2024-12-07 18:18:57,278 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-07 18:18:57,294 main DEBUG Took 0.013611 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 18:18:57,294 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 18:18:57,295 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 18:18:57,296 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 18:18:57,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,305 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 18:18:57,320 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,323 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,325 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,326 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,327 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,328 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,328 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,330 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,331 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,332 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,332 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,333 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,333 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,334 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,334 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,335 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 18:18:57,336 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,336 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 18:18:57,338 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 18:18:57,340 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 18:18:57,342 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 18:18:57,342 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 18:18:57,343 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 18:18:57,344 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 18:18:57,352 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 18:18:57,354 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 18:18:57,356 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 18:18:57,356 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 18:18:57,356 main DEBUG createAppenders(={Console}) 2024-12-07 18:18:57,357 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-07 18:18:57,357 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-07 18:18:57,358 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-07 18:18:57,358 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 18:18:57,358 main DEBUG OutputStream closed 2024-12-07 18:18:57,359 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 18:18:57,359 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 18:18:57,359 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-07 18:18:57,441 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 18:18:57,443 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 18:18:57,445 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 18:18:57,446 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 18:18:57,447 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 18:18:57,447 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 18:18:57,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 18:18:57,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 18:18:57,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 18:18:57,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 18:18:57,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 18:18:57,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 18:18:57,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 18:18:57,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 18:18:57,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 18:18:57,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 18:18:57,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 18:18:57,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 18:18:57,455 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 18:18:57,456 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-07 18:18:57,456 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 18:18:57,457 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-07T18:18:57,689 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62 2024-12-07 18:18:57,692 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 18:18:57,692 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T18:18:57,701 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-07T18:18:57,719 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T18:18:57,722 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5, deleteOnExit=true 2024-12-07T18:18:57,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-07T18:18:57,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/test.cache.data in system properties and HBase conf 2024-12-07T18:18:57,724 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T18:18:57,724 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.log.dir in system properties and HBase conf 2024-12-07T18:18:57,725 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T18:18:57,725 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T18:18:57,726 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-07T18:18:57,838 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T18:18:57,986 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T18:18:57,992 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T18:18:57,992 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T18:18:57,993 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T18:18:57,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T18:18:57,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T18:18:57,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T18:18:57,996 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T18:18:57,996 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T18:18:57,997 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T18:18:57,997 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/nfs.dump.dir in system properties and HBase conf 2024-12-07T18:18:57,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/java.io.tmpdir in system properties and HBase conf 2024-12-07T18:18:57,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T18:18:57,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T18:18:58,000 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T18:18:58,900 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T18:18:58,988 INFO [Time-limited test {}] log.Log(170): Logging initialized @2477ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T18:18:59,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T18:18:59,172 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T18:18:59,195 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T18:18:59,195 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T18:18:59,196 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T18:18:59,210 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T18:18:59,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.log.dir/,AVAILABLE} 2024-12-07T18:18:59,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T18:18:59,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6904431c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/java.io.tmpdir/jetty-localhost-41713-hadoop-hdfs-3_4_1-tests_jar-_-any-2018444138955102795/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T18:18:59,418 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:41713} 2024-12-07T18:18:59,419 INFO [Time-limited test {}] server.Server(415): Started @2909ms 2024-12-07T18:18:59,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T18:18:59,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T18:18:59,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T18:18:59,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T18:18:59,852 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T18:18:59,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.log.dir/,AVAILABLE} 2024-12-07T18:18:59,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T18:18:59,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29607158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/java.io.tmpdir/jetty-localhost-38013-hadoop-hdfs-3_4_1-tests_jar-_-any-4993640597168748388/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T18:18:59,979 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:38013} 2024-12-07T18:18:59,979 INFO [Time-limited test {}] server.Server(415): Started @3470ms 2024-12-07T18:19:00,038 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T18:19:00,512 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/dfs/data/data2/current/BP-737318815-172.17.0.2-1733595538639/current, will proceed with Du for space computation calculation, 2024-12-07T18:19:00,512 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/dfs/data/data1/current/BP-737318815-172.17.0.2-1733595538639/current, will proceed with Du for space computation calculation, 2024-12-07T18:19:00,566 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T18:19:00,621 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0223ab753ad00cb with lease ID 0x95c43c5e463d0f22: Processing first storage report for DS-8a24733a-4165-42d5-bf67-f60099fe6e42 from datanode DatanodeRegistration(127.0.0.1:33311, datanodeUuid=9fda8575-3961-4bd1-9383-11e803969db3, infoPort=42557, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=1898204670;c=1733595538639) 2024-12-07T18:19:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0223ab753ad00cb with lease ID 0x95c43c5e463d0f22: from storage DS-8a24733a-4165-42d5-bf67-f60099fe6e42 node DatanodeRegistration(127.0.0.1:33311, datanodeUuid=9fda8575-3961-4bd1-9383-11e803969db3, infoPort=42557, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=1898204670;c=1733595538639), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T18:19:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0223ab753ad00cb with lease ID 0x95c43c5e463d0f22: Processing first storage report for DS-43389934-886d-4c3c-8272-3c346b420634 from datanode DatanodeRegistration(127.0.0.1:33311, datanodeUuid=9fda8575-3961-4bd1-9383-11e803969db3, infoPort=42557, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=1898204670;c=1733595538639) 2024-12-07T18:19:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0223ab753ad00cb with lease ID 0x95c43c5e463d0f22: from storage DS-43389934-886d-4c3c-8272-3c346b420634 node DatanodeRegistration(127.0.0.1:33311, datanodeUuid=9fda8575-3961-4bd1-9383-11e803969db3, infoPort=42557, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=1898204670;c=1733595538639), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T18:19:00,642 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62 2024-12-07T18:19:00,725 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/zookeeper_0, clientPort=56016, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T18:19:00,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56016 2024-12-07T18:19:00,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:00,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741825_1001 (size=7) 2024-12-07T18:19:01,416 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 with version=8 2024-12-07T18:19:01,417 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/hbase-staging 2024-12-07T18:19:01,551 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T18:19:01,833 INFO [Time-limited test {}] client.ConnectionUtils(129): master/8a7a030b35db:0 server-side Connection retries=45 2024-12-07T18:19:01,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:01,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:01,854 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T18:19:01,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:01,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T18:19:01,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T18:19:02,058 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T18:19:02,068 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T18:19:02,072 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T18:19:02,101 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7464 (auto-detected) 2024-12-07T18:19:02,103 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T18:19:02,123 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35545 2024-12-07T18:19:02,131 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:02,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:02,148 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35545 connecting to ZooKeeper ensemble=127.0.0.1:56016 2024-12-07T18:19:02,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355450x0, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T18:19:02,182 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35545-0x1006db465c50000 connected 2024-12-07T18:19:02,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T18:19:02,219 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T18:19:02,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T18:19:02,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35545 2024-12-07T18:19:02,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35545 2024-12-07T18:19:02,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35545 2024-12-07T18:19:02,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35545 2024-12-07T18:19:02,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35545 2024-12-07T18:19:02,241 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7, hbase.cluster.distributed=false 2024-12-07T18:19:02,336 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8a7a030b35db:0 server-side Connection retries=45 2024-12-07T18:19:02,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:02,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:02,337 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T18:19:02,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T18:19:02,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T18:19:02,341 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T18:19:02,344 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T18:19:02,346 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45237 2024-12-07T18:19:02,348 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T18:19:02,358 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T18:19:02,360 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:02,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:02,369 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45237 connecting to ZooKeeper ensemble=127.0.0.1:56016 2024-12-07T18:19:02,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452370x0, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T18:19:02,374 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452370x0, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T18:19:02,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452370x0, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T18:19:02,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45237-0x1006db465c50001 connected 2024-12-07T18:19:02,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T18:19:02,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45237 2024-12-07T18:19:02,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45237 2024-12-07T18:19:02,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45237 2024-12-07T18:19:02,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45237 2024-12-07T18:19:02,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45237 2024-12-07T18:19:02,390 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/8a7a030b35db,35545,1733595541544 2024-12-07T18:19:02,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T18:19:02,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T18:19:02,400 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8a7a030b35db,35545,1733595541544 2024-12-07T18:19:02,411 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8a7a030b35db:35545 2024-12-07T18:19:02,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T18:19:02,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T18:19:02,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:02,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:02,424 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T18:19:02,426 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8a7a030b35db,35545,1733595541544 from backup master directory 2024-12-07T18:19:02,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T18:19:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8a7a030b35db,35545,1733595541544 2024-12-07T18:19:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T18:19:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T18:19:02,431 WARN [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T18:19:02,431 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8a7a030b35db,35545,1733595541544 2024-12-07T18:19:02,433 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T18:19:02,435 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T18:19:02,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741826_1002 (size=42) 2024-12-07T18:19:02,913 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/hbase.id with ID: d564c94e-3818-4df3-9d8b-a70886887e68 2024-12-07T18:19:02,961 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T18:19:02,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:02,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741827_1003 (size=196) 2024-12-07T18:19:03,035 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:19:03,037 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T18:19:03,059 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:03,065 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T18:19:03,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741828_1004 (size=1189) 2024-12-07T18:19:03,520 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store 2024-12-07T18:19:03,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741829_1005 (size=34) 2024-12-07T18:19:03,541 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T18:19:03,541 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:03,542 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T18:19:03,542 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:19:03,542 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:19:03,543 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-12-07T18:19:03,543 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:19:03,543 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:19:03,543 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T18:19:03,545 WARN [master/8a7a030b35db:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/.initializing 2024-12-07T18:19:03,545 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/WALs/8a7a030b35db,35545,1733595541544 2024-12-07T18:19:03,551 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T18:19:03,562 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8a7a030b35db%2C35545%2C1733595541544, suffix=, logDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/WALs/8a7a030b35db,35545,1733595541544, archiveDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/oldWALs, maxLogs=10 2024-12-07T18:19:03,586 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/WALs/8a7a030b35db,35545,1733595541544/8a7a030b35db%2C35545%2C1733595541544.1733595543568, exclude list is [], retry=0 2024-12-07T18:19:03,603 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33311,DS-8a24733a-4165-42d5-bf67-f60099fe6e42,DISK] 2024-12-07T18:19:03,606 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T18:19:03,649 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/WALs/8a7a030b35db,35545,1733595541544/8a7a030b35db%2C35545%2C1733595541544.1733595543568 2024-12-07T18:19:03,650 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42557:42557)] 2024-12-07T18:19:03,650 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:03,651 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:03,656 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,657 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T18:19:03,732 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:03,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:03,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T18:19:03,739 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:03,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:03,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T18:19:03,743 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:03,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:03,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T18:19:03,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:03,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:03,752 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,753 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,762 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T18:19:03,766 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T18:19:03,770 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:19:03,771 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65196898, jitterRate=-0.028490513563156128}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T18:19:03,775 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T18:19:03,776 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T18:19:03,806 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dd3d717, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:03,840 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-07T18:19:03,852 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T18:19:03,852 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T18:19:03,854 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T18:19:03,856 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T18:19:03,861 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-07T18:19:03,861 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T18:19:03,888 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T18:19:03,900 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T18:19:03,905 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-07T18:19:03,908 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T18:19:03,909 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T18:19:03,911 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-07T18:19:03,914 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T18:19:03,918 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T18:19:03,920 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-07T18:19:03,921 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T18:19:03,923 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T18:19:03,932 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T18:19:03,935 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T18:19:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T18:19:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T18:19:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,940 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=8a7a030b35db,35545,1733595541544, sessionid=0x1006db465c50000, setting cluster-up flag (Was=false) 2024-12-07T18:19:03,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,959 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T18:19:03,960 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8a7a030b35db,35545,1733595541544 2024-12-07T18:19:03,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:03,972 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T18:19:03,974 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8a7a030b35db,35545,1733595541544 2024-12-07T18:19:04,003 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8a7a030b35db:45237 2024-12-07T18:19:04,005 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1008): ClusterId : d564c94e-3818-4df3-9d8b-a70886887e68 2024-12-07T18:19:04,007 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T18:19:04,012 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T18:19:04,013 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T18:19:04,016 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T18:19:04,017 DEBUG [RS:0;8a7a030b35db:45237 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58652bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:04,018 DEBUG [RS:0;8a7a030b35db:45237 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecc01f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8a7a030b35db/172.17.0.2:0 2024-12-07T18:19:04,022 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T18:19:04,022 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T18:19:04,022 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T18:19:04,024 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(3073): reportForDuty to master=8a7a030b35db,35545,1733595541544 with isa=8a7a030b35db/172.17.0.2:45237, startcode=1733595542335 2024-12-07T18:19:04,036 DEBUG [RS:0;8a7a030b35db:45237 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T18:19:04,073 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-07T18:19:04,079 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57587, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T18:19:04,083 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-07T18:19:04,085 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35545 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:04,087 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T18:19:04,094 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8a7a030b35db,35545,1733595541544 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T18:19:04,098 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8a7a030b35db:0, corePoolSize=5, maxPoolSize=5 2024-12-07T18:19:04,098 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8a7a030b35db:0, corePoolSize=5, maxPoolSize=5 2024-12-07T18:19:04,099 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8a7a030b35db:0, corePoolSize=5, maxPoolSize=5 2024-12-07T18:19:04,099 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8a7a030b35db:0, corePoolSize=5, maxPoolSize=5 2024-12-07T18:19:04,099 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8a7a030b35db:0, corePoolSize=10, maxPoolSize=10 2024-12-07T18:19:04,099 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,100 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8a7a030b35db:0, corePoolSize=2, maxPoolSize=2 2024-12-07T18:19:04,100 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,102 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733595574101 2024-12-07T18:19:04,104 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T18:19:04,105 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T18:19:04,106 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T18:19:04,106 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-07T18:19:04,110 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T18:19:04,110 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:04,110 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T18:19:04,110 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T18:19:04,111 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T18:19:04,111 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T18:19:04,112 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,113 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T18:19:04,115 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T18:19:04,115 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T18:19:04,116 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T18:19:04,116 WARN [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T18:19:04,118 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T18:19:04,118 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T18:19:04,120 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.large.0-1733595544120,5,FailOnTimeoutGroup] 2024-12-07T18:19:04,120 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.small.0-1733595544120,5,FailOnTimeoutGroup] 2024-12-07T18:19:04,120 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,121 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T18:19:04,122 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741831_1007 (size=1039) 2024-12-07T18:19:04,122 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,218 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(3073): reportForDuty to master=8a7a030b35db,35545,1733595541544 with isa=8a7a030b35db/172.17.0.2:45237, startcode=1733595542335 2024-12-07T18:19:04,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35545 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,222 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35545 {}] master.ServerManager(486): Registering regionserver=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,231 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:19:04,231 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39489 2024-12-07T18:19:04,231 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T18:19:04,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T18:19:04,237 DEBUG [RS:0;8a7a030b35db:45237 {}] zookeeper.ZKUtil(111): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,237 WARN [RS:0;8a7a030b35db:45237 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T18:19:04,237 INFO [RS:0;8a7a030b35db:45237 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T18:19:04,237 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,239 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8a7a030b35db,45237,1733595542335] 2024-12-07T18:19:04,253 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T18:19:04,267 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T18:19:04,281 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T18:19:04,284 INFO [RS:0;8a7a030b35db:45237 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T18:19:04,285 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,285 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T18:19:04,292 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,292 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,292 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,292 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,292 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,293 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,293 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8a7a030b35db:0, corePoolSize=2, maxPoolSize=2 2024-12-07T18:19:04,293 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,293 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,294 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,294 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,294 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8a7a030b35db:0, corePoolSize=1, maxPoolSize=1 2024-12-07T18:19:04,294 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8a7a030b35db:0, corePoolSize=3, maxPoolSize=3 2024-12-07T18:19:04,294 DEBUG [RS:0;8a7a030b35db:45237 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0, corePoolSize=3, maxPoolSize=3 2024-12-07T18:19:04,295 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,295 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,296 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,296 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,296 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,45237,1733595542335-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T18:19:04,317 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T18:19:04,319 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,45237,1733595542335-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:04,338 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.Replication(204): 8a7a030b35db,45237,1733595542335 started 2024-12-07T18:19:04,338 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1767): Serving as 8a7a030b35db,45237,1733595542335, RpcServer on 8a7a030b35db/172.17.0.2:45237, sessionid=0x1006db465c50001 2024-12-07T18:19:04,339 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T18:19:04,339 DEBUG [RS:0;8a7a030b35db:45237 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,339 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8a7a030b35db,45237,1733595542335' 2024-12-07T18:19:04,339 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T18:19:04,340 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T18:19:04,341 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T18:19:04,341 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T18:19:04,341 DEBUG [RS:0;8a7a030b35db:45237 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:04,341 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8a7a030b35db,45237,1733595542335' 2024-12-07T18:19:04,341 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T18:19:04,342 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T18:19:04,342 DEBUG [RS:0;8a7a030b35db:45237 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T18:19:04,343 INFO [RS:0;8a7a030b35db:45237 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T18:19:04,343 INFO [RS:0;8a7a030b35db:45237 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T18:19:04,448 INFO [RS:0;8a7a030b35db:45237 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T18:19:04,452 INFO [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8a7a030b35db%2C45237%2C1733595542335, suffix=, logDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335, archiveDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/oldWALs, maxLogs=32 2024-12-07T18:19:04,470 DEBUG [RS:0;8a7a030b35db:45237 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335/8a7a030b35db%2C45237%2C1733595542335.1733595544455, exclude list is [], retry=0 2024-12-07T18:19:04,474 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33311,DS-8a24733a-4165-42d5-bf67-f60099fe6e42,DISK] 2024-12-07T18:19:04,478 INFO [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335/8a7a030b35db%2C45237%2C1733595542335.1733595544455 2024-12-07T18:19:04,479 DEBUG [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42557:42557)] 2024-12-07T18:19:04,525 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-07T18:19:04,525 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:19:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741833_1009 (size=32) 2024-12-07T18:19:04,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:04,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T18:19:04,943 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T18:19:04,943 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:04,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:04,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T18:19:04,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T18:19:04,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:04,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:04,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T18:19:04,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T18:19:04,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:04,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:04,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740 2024-12-07T18:19:04,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740 2024-12-07T18:19:04,959 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:19:04,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T18:19:04,967 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:19:04,968 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69312404, jitterRate=0.032835304737091064}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:19:04,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T18:19:04,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T18:19:04,971 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T18:19:04,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T18:19:04,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T18:19:04,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T18:19:04,973 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T18:19:04,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T18:19:04,976 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T18:19:04,976 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-07T18:19:04,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T18:19:04,992 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T18:19:04,994 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T18:19:05,146 DEBUG [8a7a030b35db:35545 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T18:19:05,151 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:05,156 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8a7a030b35db,45237,1733595542335, state=OPENING 2024-12-07T18:19:05,161 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T18:19:05,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:05,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:05,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T18:19:05,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T18:19:05,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:05,341 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:05,342 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T18:19:05,346 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T18:19:05,357 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-07T18:19:05,357 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T18:19:05,358 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T18:19:05,361 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8a7a030b35db%2C45237%2C1733595542335.meta, suffix=.meta, logDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335, archiveDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/oldWALs, maxLogs=32 2024-12-07T18:19:05,378 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335/8a7a030b35db%2C45237%2C1733595542335.meta.1733595545363.meta, exclude list is [], retry=0 2024-12-07T18:19:05,383 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33311,DS-8a24733a-4165-42d5-bf67-f60099fe6e42,DISK] 2024-12-07T18:19:05,386 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/WALs/8a7a030b35db,45237,1733595542335/8a7a030b35db%2C45237%2C1733595542335.meta.1733595545363.meta 2024-12-07T18:19:05,387 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42557:42557)] 2024-12-07T18:19:05,387 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:05,389 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T18:19:05,461 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T18:19:05,466 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T18:19:05,470 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T18:19:05,471 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:05,471 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-07T18:19:05,471 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-07T18:19:05,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T18:19:05,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T18:19:05,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:05,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:05,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T18:19:05,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T18:19:05,480 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:05,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:05,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T18:19:05,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T18:19:05,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:05,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T18:19:05,485 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740 2024-12-07T18:19:05,487 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740 2024-12-07T18:19:05,490 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:19:05,493 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T18:19:05,495 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60057120, jitterRate=-0.10507917404174805}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:19:05,497 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T18:19:05,505 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733595545335 2024-12-07T18:19:05,516 DEBUG [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T18:19:05,517 INFO [RS_OPEN_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-07T18:19:05,518 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:05,520 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8a7a030b35db,45237,1733595542335, state=OPEN 2024-12-07T18:19:05,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T18:19:05,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T18:19:05,526 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T18:19:05,526 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T18:19:05,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T18:19:05,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=8a7a030b35db,45237,1733595542335 in 360 msec 2024-12-07T18:19:05,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T18:19:05,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 549 msec 2024-12-07T18:19:05,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5310 sec 2024-12-07T18:19:05,543 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733595545543, completionTime=-1 2024-12-07T18:19:05,543 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T18:19:05,543 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-07T18:19:05,581 DEBUG [hconnection-0x72e0059f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:05,584 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52074, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:05,594 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-07T18:19:05,594 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733595605594 2024-12-07T18:19:05,595 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733595665595 2024-12-07T18:19:05,595 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-07T18:19:05,631 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:05,631 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:05,631 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:05,633 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8a7a030b35db:35545, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:05,634 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:05,639 DEBUG [master/8a7a030b35db:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-07T18:19:05,644 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-07T18:19:05,646 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T18:19:05,653 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-07T18:19:05,656 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:19:05,657 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:05,659 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:19:05,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741835_1011 (size=358) 2024-12-07T18:19:06,076 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e84b61442b688dc7f09be574fc7d8389, NAME => 'hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:19:06,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741836_1012 (size=42) 2024-12-07T18:19:06,492 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:06,492 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e84b61442b688dc7f09be574fc7d8389, disabling compactions & flushes 2024-12-07T18:19:06,492 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,492 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,493 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. after waiting 0 ms 2024-12-07T18:19:06,493 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,493 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,493 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e84b61442b688dc7f09be574fc7d8389: 2024-12-07T18:19:06,495 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:19:06,502 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733595546497"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595546497"}]},"ts":"1733595546497"} 2024-12-07T18:19:06,527 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:19:06,529 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:19:06,532 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595546530"}]},"ts":"1733595546530"} 2024-12-07T18:19:06,537 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-07T18:19:06,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e84b61442b688dc7f09be574fc7d8389, ASSIGN}] 2024-12-07T18:19:06,546 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e84b61442b688dc7f09be574fc7d8389, ASSIGN 2024-12-07T18:19:06,547 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e84b61442b688dc7f09be574fc7d8389, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:19:06,698 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e84b61442b688dc7f09be574fc7d8389, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:06,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e84b61442b688dc7f09be574fc7d8389, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:06,856 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:06,862 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,863 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e84b61442b688dc7f09be574fc7d8389, NAME => 'hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:06,863 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,863 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:06,864 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,864 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,866 INFO [StoreOpener-e84b61442b688dc7f09be574fc7d8389-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,868 INFO [StoreOpener-e84b61442b688dc7f09be574fc7d8389-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e84b61442b688dc7f09be574fc7d8389 columnFamilyName info 2024-12-07T18:19:06,868 DEBUG [StoreOpener-e84b61442b688dc7f09be574fc7d8389-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:06,869 INFO [StoreOpener-e84b61442b688dc7f09be574fc7d8389-1 {}] regionserver.HStore(327): Store=e84b61442b688dc7f09be574fc7d8389/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:06,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,875 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:19:06,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:19:06,881 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e84b61442b688dc7f09be574fc7d8389; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61577801, jitterRate=-0.0824192613363266}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T18:19:06,882 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e84b61442b688dc7f09be574fc7d8389: 2024-12-07T18:19:06,885 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389., pid=6, masterSystemTime=1733595546856 2024-12-07T18:19:06,888 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,888 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:19:06,889 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e84b61442b688dc7f09be574fc7d8389, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:06,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T18:19:06,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e84b61442b688dc7f09be574fc7d8389, server=8a7a030b35db,45237,1733595542335 in 190 msec 2024-12-07T18:19:06,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T18:19:06,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e84b61442b688dc7f09be574fc7d8389, ASSIGN in 353 msec 2024-12-07T18:19:06,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:19:06,902 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595546902"}]},"ts":"1733595546902"} 2024-12-07T18:19:06,905 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-07T18:19:06,909 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:19:06,912 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2630 sec 2024-12-07T18:19:06,957 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-07T18:19:06,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-07T18:19:06,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:06,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:19:06,991 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-07T18:19:07,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T18:19:07,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 28 msec 2024-12-07T18:19:07,026 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-07T18:19:07,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T18:19:07,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-07T18:19:07,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-07T18:19:07,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-07T18:19:07,056 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.625sec 2024-12-07T18:19:07,058 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T18:19:07,059 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T18:19:07,060 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T18:19:07,061 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T18:19:07,061 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T18:19:07,062 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T18:19:07,062 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T18:19:07,068 DEBUG [master/8a7a030b35db:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-07T18:19:07,069 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T18:19:07,070 INFO [master/8a7a030b35db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8a7a030b35db,35545,1733595541544-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T18:19:07,112 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0870ca2a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63dfbe60 2024-12-07T18:19:07,113 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-07T18:19:07,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@430e71de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:07,126 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T18:19:07,126 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T18:19:07,137 DEBUG [hconnection-0x6449c4c6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:07,147 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:07,155 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=8a7a030b35db,35545,1733595541544 2024-12-07T18:19:07,205 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=216, ProcessCount=11, AvailableMemoryMB=7316 2024-12-07T18:19:07,224 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:19:07,227 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:19:07,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:19:07,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:19:07,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:07,244 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:19:07,245 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:07,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-07T18:19:07,247 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:19:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T18:19:07,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741837_1013 (size=963) 2024-12-07T18:19:07,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T18:19:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T18:19:07,664 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:19:07,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741838_1014 (size=53) 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a05999984107cee49bb0b7292dd34cbb, disabling compactions & flushes 2024-12-07T18:19:07,687 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. after waiting 0 ms 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:07,687 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:07,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:07,690 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:19:07,690 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595547690"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595547690"}]},"ts":"1733595547690"} 2024-12-07T18:19:07,695 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:19:07,697 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:19:07,697 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595547697"}]},"ts":"1733595547697"} 2024-12-07T18:19:07,702 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:19:07,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, ASSIGN}] 2024-12-07T18:19:07,711 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, ASSIGN 2024-12-07T18:19:07,713 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:19:07,863 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=a05999984107cee49bb0b7292dd34cbb, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T18:19:07,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:08,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,026 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,027 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:08,027 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,027 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:08,028 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,028 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,030 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,033 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:08,034 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a05999984107cee49bb0b7292dd34cbb columnFamilyName A 2024-12-07T18:19:08,034 DEBUG [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:08,035 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(327): Store=a05999984107cee49bb0b7292dd34cbb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:08,035 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,037 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:08,037 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a05999984107cee49bb0b7292dd34cbb columnFamilyName B 2024-12-07T18:19:08,037 DEBUG [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:08,038 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(327): Store=a05999984107cee49bb0b7292dd34cbb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:08,038 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,040 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:08,041 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a05999984107cee49bb0b7292dd34cbb columnFamilyName C 2024-12-07T18:19:08,041 DEBUG [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:08,042 INFO [StoreOpener-a05999984107cee49bb0b7292dd34cbb-1 {}] regionserver.HStore(327): Store=a05999984107cee49bb0b7292dd34cbb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:08,042 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,043 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,044 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,050 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:19:08,052 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,056 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:19:08,057 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened a05999984107cee49bb0b7292dd34cbb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69431294, jitterRate=0.03460690379142761}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:19:08,057 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:08,059 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., pid=11, masterSystemTime=1733595548020 2024-12-07T18:19:08,062 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,062 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,063 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=a05999984107cee49bb0b7292dd34cbb, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-07T18:19:08,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 in 200 msec 2024-12-07T18:19:08,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T18:19:08,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, ASSIGN in 363 msec 2024-12-07T18:19:08,075 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:19:08,075 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595548075"}]},"ts":"1733595548075"} 2024-12-07T18:19:08,078 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:19:08,081 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:19:08,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 843 msec 2024-12-07T18:19:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T18:19:08,368 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-07T18:19:08,372 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02a08c5a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6be4168e 2024-12-07T18:19:08,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ed9b166, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,379 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,381 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,385 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:19:08,387 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:19:08,394 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24512372 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5038857 2024-12-07T18:19:08,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27c80704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,399 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53623ce6 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4aba57ed 2024-12-07T18:19:08,402 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f68aae6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66d523ff to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@629b91f8 2024-12-07T18:19:08,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18de28d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,408 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b2c1d to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b16227 2024-12-07T18:19:08,411 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb8ce8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,412 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d888e3e to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53f30e40 2024-12-07T18:19:08,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7915562a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,417 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f34c0b8 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@691cbc80 2024-12-07T18:19:08,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502730d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,421 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ebb9f30 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cfc6db 2024-12-07T18:19:08,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b8793a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,425 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f18a09d to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8b52656 2024-12-07T18:19:08,430 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71209fad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,431 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x426bcd11 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@634dc49c 2024-12-07T18:19:08,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1665e2af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:08,442 DEBUG [hconnection-0x7c0bf2ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,444 DEBUG [hconnection-0x63fd773c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,445 DEBUG [hconnection-0x2717408a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,445 DEBUG [hconnection-0x1956867-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:08,450 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,450 DEBUG [hconnection-0x1f74a850-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,451 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-07T18:19:08,454 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,454 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,455 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:08,456 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,457 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:08,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:08,463 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:08,468 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,470 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,473 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,473 DEBUG [hconnection-0x372f53f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,482 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,492 DEBUG [hconnection-0xfe98f8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:08,505 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:08,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:08,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:08,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:08,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:08,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:08,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:08,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:08,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595608688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595608692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595608695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595608699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595608704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8d3c2f3253d441f19f7a1ab4aed2a24d is 50, key is test_row_0/A:col10/1733595548552/Put/seqid=0 2024-12-07T18:19:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:08,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741839_1015 (size=12001) 2024-12-07T18:19:08,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595608811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595608812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595608813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595608811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595608819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,830 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:08,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:08,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:08,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:09,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:09,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595609020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595609023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595609023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595609024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595609028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:09,166 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:09,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:09,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8d3c2f3253d441f19f7a1ab4aed2a24d 2024-12-07T18:19:09,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/28504d646c6748738b71ce4afa8de095 is 50, key is test_row_0/B:col10/1733595548552/Put/seqid=0 2024-12-07T18:19:09,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595609326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595609331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595609331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595609333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595609338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741840_1016 (size=12001) 2024-12-07T18:19:09,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/28504d646c6748738b71ce4afa8de095 2024-12-07T18:19:09,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/a806839f192f41b5b719b21156ad470a is 50, key is test_row_0/C:col10/1733595548552/Put/seqid=0 2024-12-07T18:19:09,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:09,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741841_1017 (size=12001) 2024-12-07T18:19:09,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/a806839f192f41b5b719b21156ad470a 2024-12-07T18:19:09,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8d3c2f3253d441f19f7a1ab4aed2a24d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d 2024-12-07T18:19:09,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d, entries=150, sequenceid=16, filesize=11.7 K 2024-12-07T18:19:09,519 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-07T18:19:09,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/28504d646c6748738b71ce4afa8de095 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095 2024-12-07T18:19:09,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095, entries=150, sequenceid=16, filesize=11.7 K 2024-12-07T18:19:09,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/a806839f192f41b5b719b21156ad470a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a 2024-12-07T18:19:09,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a, entries=150, sequenceid=16, filesize=11.7 K 2024-12-07T18:19:09,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a05999984107cee49bb0b7292dd34cbb in 994ms, sequenceid=16, compaction requested=false 2024-12-07T18:19:09,557 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-07T18:19:09,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:09,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-07T18:19:09,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:09,635 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-07T18:19:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:09,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:09,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:09,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/17799a91678a4d7faa4380e94c747c36 is 50, key is test_row_0/A:col10/1733595548690/Put/seqid=0 2024-12-07T18:19:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741842_1018 (size=12001) 2024-12-07T18:19:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:09,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:09,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595609878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595609900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595609895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595609921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:09,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595609926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595610026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595610028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595610033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595610033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595610029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,145 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/17799a91678a4d7faa4380e94c747c36 2024-12-07T18:19:10,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/5a5071e9f2014e02a0503ee2ef627549 is 50, key is test_row_0/B:col10/1733595548690/Put/seqid=0 2024-12-07T18:19:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741843_1019 (size=12001) 2024-12-07T18:19:10,233 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/5a5071e9f2014e02a0503ee2ef627549 2024-12-07T18:19:10,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595610253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T18:19:10,259 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-07T18:19:10,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595610254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595610255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595610256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595610257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ea707277c19644acbec4f4508ded7b9e is 50, key is test_row_0/C:col10/1733595548690/Put/seqid=0 2024-12-07T18:19:10,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741844_1020 (size=12001) 2024-12-07T18:19:10,346 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ea707277c19644acbec4f4508ded7b9e 2024-12-07T18:19:10,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/17799a91678a4d7faa4380e94c747c36 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36 2024-12-07T18:19:10,389 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:19:10,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/5a5071e9f2014e02a0503ee2ef627549 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549 2024-12-07T18:19:10,403 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:19:10,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ea707277c19644acbec4f4508ded7b9e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e 2024-12-07T18:19:10,424 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:19:10,427 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a05999984107cee49bb0b7292dd34cbb in 793ms, sequenceid=37, compaction requested=false 2024-12-07T18:19:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-07T18:19:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-07T18:19:10,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-07T18:19:10,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9710 sec 2024-12-07T18:19:10,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9880 sec 2024-12-07T18:19:10,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:10,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-07T18:19:10,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:10,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:10,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:10,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T18:19:10,581 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-07T18:19:10,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:10,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/fd0ff6e6704b46faae57e674140727ab is 50, key is test_row_0/A:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:10,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-07T18:19:10,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-07T18:19:10,593 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:10,594 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:10,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:10,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741845_1021 (size=14341) 2024-12-07T18:19:10,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/fd0ff6e6704b46faae57e674140727ab 2024-12-07T18:19:10,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595610611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595610618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595610614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595610619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595610624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fa35c78b0168499489150428c1c35b94 is 50, key is test_row_0/B:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:10,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741846_1022 (size=12001) 2024-12-07T18:19:10,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fa35c78b0168499489150428c1c35b94 2024-12-07T18:19:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-07T18:19:10,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9aee3f1ebea642fe87f51cb142d011c9 is 50, key is test_row_0/C:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:10,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595610727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595610729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595610729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595610729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:10,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595610735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,751 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-07T18:19:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:10,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:10,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:10,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741847_1023 (size=12001) 2024-12-07T18:19:10,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9aee3f1ebea642fe87f51cb142d011c9 2024-12-07T18:19:10,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/fd0ff6e6704b46faae57e674140727ab as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab 2024-12-07T18:19:10,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab, entries=200, sequenceid=54, filesize=14.0 K 2024-12-07T18:19:10,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fa35c78b0168499489150428c1c35b94 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94 2024-12-07T18:19:10,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94, entries=150, sequenceid=54, filesize=11.7 K 2024-12-07T18:19:10,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9aee3f1ebea642fe87f51cb142d011c9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9 2024-12-07T18:19:10,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9, entries=150, sequenceid=54, filesize=11.7 K 2024-12-07T18:19:10,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a05999984107cee49bb0b7292dd34cbb in 270ms, sequenceid=54, compaction requested=true 2024-12-07T18:19:10,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:10,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:10,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:10,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:10,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:10,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:10,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:10,840 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:10,841 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:10,844 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:10,846 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:10,846 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,846 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.2 K 2024-12-07T18:19:10,849 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 28504d646c6748738b71ce4afa8de095, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595548470 2024-12-07T18:19:10,850 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a5071e9f2014e02a0503ee2ef627549, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595548632 2024-12-07T18:19:10,851 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fa35c78b0168499489150428c1c35b94, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:10,854 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:10,854 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:10,854 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,855 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=37.4 K 2024-12-07T18:19:10,856 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d3c2f3253d441f19f7a1ab4aed2a24d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595548470 2024-12-07T18:19:10,857 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17799a91678a4d7faa4380e94c747c36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595548632 2024-12-07T18:19:10,858 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd0ff6e6704b46faae57e674140727ab, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-07T18:19:10,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:10,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-07T18:19:10,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:10,908 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-07T18:19:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:10,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:10,923 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#9 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:10,924 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/47df46474bda45d5b081f3c9866381c6 is 50, key is test_row_0/B:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:10,931 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#10 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:10,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1f5661ce673b4158a064288759e8490a is 50, key is test_row_0/A:col10/1733595550619/Put/seqid=0 2024-12-07T18:19:10,935 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b0044be1e3f247279249754cf1ff1006 is 50, key is test_row_0/A:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:10,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:10,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:10,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741848_1024 (size=12104) 2024-12-07T18:19:10,983 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:19:10,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741849_1025 (size=12001) 2024-12-07T18:19:10,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741850_1026 (size=12104) 2024-12-07T18:19:10,999 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1f5661ce673b4158a064288759e8490a 2024-12-07T18:19:11,018 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/47df46474bda45d5b081f3c9866381c6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/47df46474bda45d5b081f3c9866381c6 2024-12-07T18:19:11,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2267264741db46c993bce90cb862f75b is 50, key is test_row_0/B:col10/1733595550619/Put/seqid=0 2024-12-07T18:19:11,079 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 47df46474bda45d5b081f3c9866381c6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:11,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:11,080 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=13, startTime=1733595550839; duration=0sec 2024-12-07T18:19:11,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:11,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:11,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:11,084 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:11,084 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:11,084 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,085 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.2 K 2024-12-07T18:19:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741851_1027 (size=12001) 2024-12-07T18:19:11,086 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a806839f192f41b5b719b21156ad470a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595548470 2024-12-07T18:19:11,088 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2267264741db46c993bce90cb862f75b 2024-12-07T18:19:11,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ea707277c19644acbec4f4508ded7b9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595548632 2024-12-07T18:19:11,091 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aee3f1ebea642fe87f51cb142d011c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:11,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/b700d8ef54714a9e9b5cb7571eec42d9 is 50, key is test_row_0/C:col10/1733595550619/Put/seqid=0 2024-12-07T18:19:11,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595611114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,139 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#14 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:11,140 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fc6140cb58ba41e7b3bb17f9c2b88ce2 is 50, key is test_row_0/C:col10/1733595550560/Put/seqid=0 2024-12-07T18:19:11,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595611109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595611117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595611121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595611130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741852_1028 (size=12001) 2024-12-07T18:19:11,181 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/b700d8ef54714a9e9b5cb7571eec42d9 2024-12-07T18:19:11,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741853_1029 (size=12104) 2024-12-07T18:19:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-07T18:19:11,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1f5661ce673b4158a064288759e8490a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a 2024-12-07T18:19:11,215 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a, entries=150, sequenceid=73, filesize=11.7 K 2024-12-07T18:19:11,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2267264741db46c993bce90cb862f75b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b 2024-12-07T18:19:11,224 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fc6140cb58ba41e7b3bb17f9c2b88ce2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fc6140cb58ba41e7b3bb17f9c2b88ce2 2024-12-07T18:19:11,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,236 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b, entries=150, sequenceid=73, filesize=11.7 K 2024-12-07T18:19:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595611233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,237 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into fc6140cb58ba41e7b3bb17f9c2b88ce2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:11,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:11,238 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=13, startTime=1733595550840; duration=0sec 2024-12-07T18:19:11,239 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:11,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/b700d8ef54714a9e9b5cb7571eec42d9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9 2024-12-07T18:19:11,239 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:11,252 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9, entries=150, sequenceid=73, filesize=11.7 K 2024-12-07T18:19:11,255 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for a05999984107cee49bb0b7292dd34cbb in 347ms, sequenceid=73, compaction requested=false 2024-12-07T18:19:11,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:11,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-07T18:19:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-07T18:19:11,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-07T18:19:11,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 666 msec 2024-12-07T18:19:11,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-07T18:19:11,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 683 msec 2024-12-07T18:19:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:11,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:11,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:11,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:11,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:11,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:11,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:11,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/720f3d6a7b5b4c68b8d8250e301808fe is 50, key is test_row_0/A:col10/1733595551116/Put/seqid=0 2024-12-07T18:19:11,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741854_1030 (size=14341) 2024-12-07T18:19:11,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/720f3d6a7b5b4c68b8d8250e301808fe 2024-12-07T18:19:11,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77e225aa5dd845faa322463fbbb08983 is 50, key is test_row_0/B:col10/1733595551116/Put/seqid=0 2024-12-07T18:19:11,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595611318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595611353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595611357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595611365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741855_1031 (size=12001) 2024-12-07T18:19:11,409 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b0044be1e3f247279249754cf1ff1006 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b0044be1e3f247279249754cf1ff1006 2024-12-07T18:19:11,422 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into b0044be1e3f247279249754cf1ff1006(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:11,422 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:11,422 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=13, startTime=1733595550836; duration=0sec 2024-12-07T18:19:11,422 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:11,422 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:11,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595611439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595611459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595611469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595611470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595611476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595611667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595611676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595611676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595611681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-07T18:19:11,700 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-07T18:19:11,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:11,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-07T18:19:11,716 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:11,719 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:11,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:11,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595611748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77e225aa5dd845faa322463fbbb08983 2024-12-07T18:19:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:11,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/69fcc5ee25c3444ea73fb043d9a96ca5 is 50, key is test_row_0/C:col10/1733595551116/Put/seqid=0 2024-12-07T18:19:11,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741856_1032 (size=12001) 2024-12-07T18:19:11,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/69fcc5ee25c3444ea73fb043d9a96ca5 2024-12-07T18:19:11,876 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:11,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:11,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:11,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/720f3d6a7b5b4c68b8d8250e301808fe as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe 2024-12-07T18:19:11,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe, entries=200, sequenceid=97, filesize=14.0 K 2024-12-07T18:19:11,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77e225aa5dd845faa322463fbbb08983 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983 2024-12-07T18:19:11,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983, entries=150, sequenceid=97, filesize=11.7 K 2024-12-07T18:19:11,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/69fcc5ee25c3444ea73fb043d9a96ca5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5 2024-12-07T18:19:11,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5, entries=150, sequenceid=97, filesize=11.7 K 2024-12-07T18:19:11,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for a05999984107cee49bb0b7292dd34cbb in 669ms, sequenceid=97, compaction requested=true 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:11,937 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:11,938 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:11,939 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:11,940 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:11,940 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,940 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b0044be1e3f247279249754cf1ff1006, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=37.5 K 2024-12-07T18:19:11,941 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:11,941 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:11,941 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:11,941 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/47df46474bda45d5b081f3c9866381c6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.3 K 2024-12-07T18:19:11,942 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0044be1e3f247279249754cf1ff1006, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:11,942 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 47df46474bda45d5b081f3c9866381c6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:11,943 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f5661ce673b4158a064288759e8490a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733595550614 2024-12-07T18:19:11,944 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2267264741db46c993bce90cb862f75b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733595550614 2024-12-07T18:19:11,944 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 720f3d6a7b5b4c68b8d8250e301808fe, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733595551107 2024-12-07T18:19:11,945 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 77e225aa5dd845faa322463fbbb08983, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733595551116 2024-12-07T18:19:11,969 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#18 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:11,971 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/8fcac339e65149c4aa5ea282f3512087 is 50, key is test_row_0/B:col10/1733595551116/Put/seqid=0 2024-12-07T18:19:11,978 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#19 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:11,979 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/bbcaaf3191654cb19bb842f21754dc6e is 50, key is test_row_0/A:col10/1733595551116/Put/seqid=0 2024-12-07T18:19:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:11,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:11,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741858_1034 (size=12207) 2024-12-07T18:19:12,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:12,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741857_1033 (size=12207) 2024-12-07T18:19:12,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0f5f75a21b0e446ea61b3ceb505cf04a is 50, key is test_row_0/A:col10/1733595551304/Put/seqid=0 2024-12-07T18:19:12,032 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595612025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595612027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595612030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741859_1035 (size=14341) 2024-12-07T18:19:12,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0f5f75a21b0e446ea61b3ceb505cf04a 2024-12-07T18:19:12,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T18:19:12,057 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T18:19:12,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-07T18:19:12,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-07T18:19:12,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T18:19:12,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T18:19:12,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T18:19:12,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T18:19:12,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-07T18:19:12,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-07T18:19:12,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e7cdf315e85747928791fbf8748059f9 is 50, key is test_row_0/B:col10/1733595551304/Put/seqid=0 2024-12-07T18:19:12,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741860_1036 (size=12001) 2024-12-07T18:19:12,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e7cdf315e85747928791fbf8748059f9 2024-12-07T18:19:12,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/684c0cc8a6b14c558400a055da38c3bf is 50, key is test_row_0/C:col10/1733595551304/Put/seqid=0 2024-12-07T18:19:12,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595612139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595612139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595612140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741861_1037 (size=12001) 2024-12-07T18:19:12,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/684c0cc8a6b14c558400a055da38c3bf 2024-12-07T18:19:12,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0f5f75a21b0e446ea61b3ceb505cf04a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a 2024-12-07T18:19:12,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a, entries=200, sequenceid=114, filesize=14.0 K 2024-12-07T18:19:12,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e7cdf315e85747928791fbf8748059f9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9 2024-12-07T18:19:12,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9, entries=150, sequenceid=114, filesize=11.7 K 2024-12-07T18:19:12,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/684c0cc8a6b14c558400a055da38c3bf as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf 2024-12-07T18:19:12,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf, entries=150, sequenceid=114, filesize=11.7 K 2024-12-07T18:19:12,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for a05999984107cee49bb0b7292dd34cbb in 228ms, sequenceid=114, compaction requested=true 2024-12-07T18:19:12,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:12,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-07T18:19:12,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:12,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-07T18:19:12,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:12,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:12,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:12,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c705dce6984e491f8871444bef1b2b1d is 50, key is test_row_0/A:col10/1733595552027/Put/seqid=0 2024-12-07T18:19:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741862_1038 (size=14541) 2024-12-07T18:19:12,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c705dce6984e491f8871444bef1b2b1d 2024-12-07T18:19:12,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595612345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595612345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595612347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595612348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/f3eb280a4f2f4f679de002204de122e5 is 50, key is test_row_0/B:col10/1733595552027/Put/seqid=0 2024-12-07T18:19:12,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741863_1039 (size=12151) 2024-12-07T18:19:12,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/f3eb280a4f2f4f679de002204de122e5 2024-12-07T18:19:12,434 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/bbcaaf3191654cb19bb842f21754dc6e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/bbcaaf3191654cb19bb842f21754dc6e 2024-12-07T18:19:12,446 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/8fcac339e65149c4aa5ea282f3512087 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8fcac339e65149c4aa5ea282f3512087 2024-12-07T18:19:12,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/20382f47d331483ea73c7091e96271a8 is 50, key is test_row_0/C:col10/1733595552027/Put/seqid=0 2024-12-07T18:19:12,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595612457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,462 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 8fcac339e65149c4aa5ea282f3512087(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:12,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,465 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,466 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=13, startTime=1733595551937; duration=0sec 2024-12-07T18:19:12,466 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-07T18:19:12,466 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:12,466 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-07T18:19:12,469 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:12,469 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:12,469 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into bbcaaf3191654cb19bb842f21754dc6e(size=11.9 K), total size for store is 25.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,470 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=13, startTime=1733595551937; duration=0sec 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:12,470 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:12,471 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:12,472 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-07T18:19:12,473 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:12,473 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fc6140cb58ba41e7b3bb17f9c2b88ce2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=47.0 K 2024-12-07T18:19:12,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T18:19:12,474 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc6140cb58ba41e7b3bb17f9c2b88ce2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595549890 2024-12-07T18:19:12,475 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:12,475 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:12,475 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:12,475 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:12,475 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b700d8ef54714a9e9b5cb7571eec42d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733595550614 2024-12-07T18:19:12,476 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69fcc5ee25c3444ea73fb043d9a96ca5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733595551116 2024-12-07T18:19:12,477 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 684c0cc8a6b14c558400a055da38c3bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733595551304 2024-12-07T18:19:12,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741864_1040 (size=12151) 2024-12-07T18:19:12,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/20382f47d331483ea73c7091e96271a8 2024-12-07T18:19:12,499 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#26 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:12,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,500 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e96f63578e614319be07298676bf0835 is 50, key is test_row_0/C:col10/1733595551304/Put/seqid=0 2024-12-07T18:19:12,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c705dce6984e491f8871444bef1b2b1d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d 2024-12-07T18:19:12,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d, entries=200, sequenceid=135, filesize=14.2 K 2024-12-07T18:19:12,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/f3eb280a4f2f4f679de002204de122e5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5 2024-12-07T18:19:12,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5, entries=150, sequenceid=135, filesize=11.9 K 2024-12-07T18:19:12,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/20382f47d331483ea73c7091e96271a8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8 2024-12-07T18:19:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741865_1041 (size=12241) 2024-12-07T18:19:12,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8, entries=150, sequenceid=135, filesize=11.9 K 2024-12-07T18:19:12,550 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e96f63578e614319be07298676bf0835 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e96f63578e614319be07298676bf0835 2024-12-07T18:19:12,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a05999984107cee49bb0b7292dd34cbb in 291ms, sequenceid=135, compaction requested=true 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,552 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:12,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:12,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:19:12,554 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41089 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:12,554 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:12,554 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,555 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/bbcaaf3191654cb19bb842f21754dc6e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=40.1 K 2024-12-07T18:19:12,555 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting bbcaaf3191654cb19bb842f21754dc6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733595551116 2024-12-07T18:19:12,557 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f5f75a21b0e446ea61b3ceb505cf04a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733595551304 2024-12-07T18:19:12,558 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c705dce6984e491f8871444bef1b2b1d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733595552018 2024-12-07T18:19:12,561 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into e96f63578e614319be07298676bf0835(size=12.0 K), total size for store is 23.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:12,561 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,561 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595552209; duration=0sec 2024-12-07T18:19:12,561 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:19:12,561 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:12,561 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:12,562 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:12,564 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:12,564 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:12,564 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,564 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8fcac339e65149c4aa5ea282f3512087, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.5 K 2024-12-07T18:19:12,565 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fcac339e65149c4aa5ea282f3512087, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733595551116 2024-12-07T18:19:12,566 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7cdf315e85747928791fbf8748059f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733595551304 2024-12-07T18:19:12,567 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3eb280a4f2f4f679de002204de122e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733595552024 2024-12-07T18:19:12,584 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:12,585 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/9bda8bb1c423473daa3769c6649cac0a is 50, key is test_row_0/A:col10/1733595552027/Put/seqid=0 2024-12-07T18:19:12,591 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#28 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:12,592 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/678d63d98437435db2a578a85b9617b0 is 50, key is test_row_0/B:col10/1733595552027/Put/seqid=0 2024-12-07T18:19:12,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741866_1042 (size=12459) 2024-12-07T18:19:12,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741867_1043 (size=12459) 2024-12-07T18:19:12,651 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/9bda8bb1c423473daa3769c6649cac0a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/9bda8bb1c423473daa3769c6649cac0a 2024-12-07T18:19:12,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:12,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-07T18:19:12,669 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:12,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,680 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 9bda8bb1c423473daa3769c6649cac0a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:12,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,680 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=13, startTime=1733595552552; duration=0sec 2024-12-07T18:19:12,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:12,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:12,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T18:19:12,681 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/678d63d98437435db2a578a85b9617b0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/678d63d98437435db2a578a85b9617b0 2024-12-07T18:19:12,681 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:12,682 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:12,682 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:12,682 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:12,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1200c8d585b141db936e6dd484883757 is 50, key is test_row_0/A:col10/1733595552661/Put/seqid=0 2024-12-07T18:19:12,695 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 678d63d98437435db2a578a85b9617b0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:12,695 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:12,695 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=13, startTime=1733595552552; duration=0sec 2024-12-07T18:19:12,695 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:12,695 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:12,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595612712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595612717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595612720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595612720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741868_1044 (size=12151) 2024-12-07T18:19:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:12,825 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595612823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595612823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595612827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595612827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595612828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:12,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:12,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:12,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595613034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595613035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595613036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595613037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595613037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1200c8d585b141db936e6dd484883757 2024-12-07T18:19:13,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/49a9c87e7afc4ddf841f74db505aef25 is 50, key is test_row_0/B:col10/1733595552661/Put/seqid=0 2024-12-07T18:19:13,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741869_1045 (size=12151) 2024-12-07T18:19:13,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595613342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595613347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595613348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595613349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595613349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,603 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/49a9c87e7afc4ddf841f74db505aef25 2024-12-07T18:19:13,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/5e85ddd0e20344458c78443630a3aec4 is 50, key is test_row_0/C:col10/1733595552661/Put/seqid=0 2024-12-07T18:19:13,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741870_1046 (size=12151) 2024-12-07T18:19:13,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:13,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595613854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595613854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595613856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595613859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595613865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,915 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:13,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:13,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:13,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:13,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:14,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:14,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:14,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:14,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:14,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:14,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:14,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/5e85ddd0e20344458c78443630a3aec4 2024-12-07T18:19:14,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1200c8d585b141db936e6dd484883757 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757 2024-12-07T18:19:14,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757, entries=150, sequenceid=155, filesize=11.9 K 2024-12-07T18:19:14,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/49a9c87e7afc4ddf841f74db505aef25 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25 2024-12-07T18:19:14,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25, entries=150, sequenceid=155, filesize=11.9 K 2024-12-07T18:19:14,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/5e85ddd0e20344458c78443630a3aec4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4 2024-12-07T18:19:14,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4, entries=150, sequenceid=155, filesize=11.9 K 2024-12-07T18:19:14,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a05999984107cee49bb0b7292dd34cbb in 1524ms, sequenceid=155, compaction requested=true 2024-12-07T18:19:14,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:14,190 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T18:19:14,192 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:14,192 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:14,192 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:14,192 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:14,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 0 2024-12-07T18:19:14,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:14,192 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-07T18:19:14,194 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-07T18:19:14,194 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-07T18:19:14,194 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. because compaction request was cancelled 2024-12-07T18:19:14,194 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:14,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 0 2024-12-07T18:19:14,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:14,194 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:14,195 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:14,195 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:14,196 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:14,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:14,196 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e96f63578e614319be07298676bf0835, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.7 K 2024-12-07T18:19:14,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:14,196 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e96f63578e614319be07298676bf0835, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733595551304 2024-12-07T18:19:14,197 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 20382f47d331483ea73c7091e96271a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733595552024 2024-12-07T18:19:14,198 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e85ddd0e20344458c78443630a3aec4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733595552321 2024-12-07T18:19:14,213 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#32 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:14,214 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/4e5e414b3cd8436bacb8b1fb6dd3cea4 is 50, key is test_row_0/C:col10/1733595552661/Put/seqid=0 2024-12-07T18:19:14,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741871_1047 (size=12493) 2024-12-07T18:19:14,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-07T18:19:14,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:14,227 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:14,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/5b5c51d3dec7475495e5c1e8d972fb4c is 50, key is test_row_0/A:col10/1733595552712/Put/seqid=0 2024-12-07T18:19:14,248 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/4e5e414b3cd8436bacb8b1fb6dd3cea4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4e5e414b3cd8436bacb8b1fb6dd3cea4 2024-12-07T18:19:14,265 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 4e5e414b3cd8436bacb8b1fb6dd3cea4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:14,265 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:14,265 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=13, startTime=1733595554194; duration=0sec 2024-12-07T18:19:14,266 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:14,266 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:14,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741872_1048 (size=12151) 2024-12-07T18:19:14,272 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/5b5c51d3dec7475495e5c1e8d972fb4c 2024-12-07T18:19:14,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/7a5966e35124418b9aaa1334a453a958 is 50, key is test_row_0/B:col10/1733595552712/Put/seqid=0 2024-12-07T18:19:14,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741873_1049 (size=12151) 2024-12-07T18:19:14,337 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/7a5966e35124418b9aaa1334a453a958 2024-12-07T18:19:14,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e3354f7298724d5699837416cd3a6938 is 50, key is test_row_0/C:col10/1733595552712/Put/seqid=0 2024-12-07T18:19:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741874_1050 (size=12151) 2024-12-07T18:19:14,393 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e3354f7298724d5699837416cd3a6938 2024-12-07T18:19:14,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/5b5c51d3dec7475495e5c1e8d972fb4c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c 2024-12-07T18:19:14,416 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c, entries=150, sequenceid=176, filesize=11.9 K 2024-12-07T18:19:14,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/7a5966e35124418b9aaa1334a453a958 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958 2024-12-07T18:19:14,426 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958, entries=150, sequenceid=176, filesize=11.9 K 2024-12-07T18:19:14,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e3354f7298724d5699837416cd3a6938 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938 2024-12-07T18:19:14,442 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938, entries=150, sequenceid=176, filesize=11.9 K 2024-12-07T18:19:14,444 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for a05999984107cee49bb0b7292dd34cbb in 217ms, sequenceid=176, compaction requested=true 2024-12-07T18:19:14,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:14,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:14,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-07T18:19:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-07T18:19:14,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-07T18:19:14,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7260 sec 2024-12-07T18:19:14,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.7450 sec 2024-12-07T18:19:14,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:19:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:14,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:14,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:14,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:14,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:14,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b7952eac8dd24a94b2b2fda2aebddf9f is 50, key is test_row_0/A:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741875_1051 (size=12151) 2024-12-07T18:19:14,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b7952eac8dd24a94b2b2fda2aebddf9f 2024-12-07T18:19:14,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:14,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595614923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595614924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595614930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595614932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:14,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595614935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:14,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/97d0ff8c073f4dbfaa19875fa0d259e6 is 50, key is test_row_0/B:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:14,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741876_1052 (size=12151) 2024-12-07T18:19:14,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/97d0ff8c073f4dbfaa19875fa0d259e6 2024-12-07T18:19:14,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ad7ef3989b3541d9b913afe2271536a6 is 50, key is test_row_0/C:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:15,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741877_1053 (size=12151) 2024-12-07T18:19:15,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ad7ef3989b3541d9b913afe2271536a6 2024-12-07T18:19:15,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b7952eac8dd24a94b2b2fda2aebddf9f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f 2024-12-07T18:19:15,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595615035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595615035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595615038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f, entries=150, sequenceid=191, filesize=11.9 K 2024-12-07T18:19:15,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/97d0ff8c073f4dbfaa19875fa0d259e6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6 2024-12-07T18:19:15,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595615039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595615043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6, entries=150, sequenceid=191, filesize=11.9 K 2024-12-07T18:19:15,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ad7ef3989b3541d9b913afe2271536a6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6 2024-12-07T18:19:15,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6, entries=150, sequenceid=191, filesize=11.9 K 2024-12-07T18:19:15,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a05999984107cee49bb0b7292dd34cbb in 188ms, sequenceid=191, compaction requested=true 2024-12-07T18:19:15,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:15,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:15,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:15,076 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:15,076 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:15,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:15,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:15,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:15,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:15,080 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:15,080 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:15,080 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:15,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:15,080 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:15,080 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/9bda8bb1c423473daa3769c6649cac0a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=47.8 K 2024-12-07T18:19:15,081 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:15,081 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/678d63d98437435db2a578a85b9617b0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=47.8 K 2024-12-07T18:19:15,081 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bda8bb1c423473daa3769c6649cac0a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733595552024 2024-12-07T18:19:15,081 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 678d63d98437435db2a578a85b9617b0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733595552024 2024-12-07T18:19:15,082 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1200c8d585b141db936e6dd484883757, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733595552321 2024-12-07T18:19:15,082 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 49a9c87e7afc4ddf841f74db505aef25, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733595552321 2024-12-07T18:19:15,083 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b5c51d3dec7475495e5c1e8d972fb4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733595552712 2024-12-07T18:19:15,083 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a5966e35124418b9aaa1334a453a958, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733595552712 2024-12-07T18:19:15,084 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 97d0ff8c073f4dbfaa19875fa0d259e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:15,084 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7952eac8dd24a94b2b2fda2aebddf9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:15,117 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:15,118 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/28c7150e91664d4b8caa21702fd6c721 is 50, key is test_row_0/B:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:15,127 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:15,128 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/712db7fc3dae43028a6183c882d83239 is 50, key is test_row_0/A:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:15,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741878_1054 (size=12595) 2024-12-07T18:19:15,188 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/28c7150e91664d4b8caa21702fd6c721 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28c7150e91664d4b8caa21702fd6c721 2024-12-07T18:19:15,199 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 28c7150e91664d4b8caa21702fd6c721(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:15,200 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:15,200 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595555076; duration=0sec 2024-12-07T18:19:15,200 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:15,200 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:15,201 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:15,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741879_1055 (size=12595) 2024-12-07T18:19:15,206 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:15,206 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:15,206 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:15,207 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4e5e414b3cd8436bacb8b1fb6dd3cea4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=35.9 K 2024-12-07T18:19:15,209 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e5e414b3cd8436bacb8b1fb6dd3cea4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733595552321 2024-12-07T18:19:15,212 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e3354f7298724d5699837416cd3a6938, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733595552712 2024-12-07T18:19:15,213 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/712db7fc3dae43028a6183c882d83239 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/712db7fc3dae43028a6183c882d83239 2024-12-07T18:19:15,215 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ad7ef3989b3541d9b913afe2271536a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:15,223 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 712db7fc3dae43028a6183c882d83239(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:15,223 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:15,223 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595555076; duration=0sec 2024-12-07T18:19:15,223 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:15,223 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:15,231 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#41 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:15,232 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/3d454caad9c24b3cb15a173617f9c379 is 50, key is test_row_0/C:col10/1733595554885/Put/seqid=0 2024-12-07T18:19:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:15,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741880_1056 (size=12595) 2024-12-07T18:19:15,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:15,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:15,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:15,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:15,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:15,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:15,255 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/3d454caad9c24b3cb15a173617f9c379 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/3d454caad9c24b3cb15a173617f9c379 2024-12-07T18:19:15,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/82933d5060444ff3a939c1dd36dab625 is 50, key is test_row_0/A:col10/1733595554920/Put/seqid=0 2024-12-07T18:19:15,269 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 3d454caad9c24b3cb15a173617f9c379(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:15,270 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:15,270 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=13, startTime=1733595555077; duration=0sec 2024-12-07T18:19:15,271 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:15,271 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:15,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595615268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595615270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595615272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595615279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595615276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741881_1057 (size=12151) 2024-12-07T18:19:15,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/82933d5060444ff3a939c1dd36dab625 2024-12-07T18:19:15,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/6dd5e97ef7924eaa8021d248e9675381 is 50, key is test_row_0/B:col10/1733595554920/Put/seqid=0 2024-12-07T18:19:15,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741882_1058 (size=12151) 2024-12-07T18:19:15,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595615378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595615378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595615381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595615384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595615385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595615588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595615586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595615593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595615584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595615596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/6dd5e97ef7924eaa8021d248e9675381 2024-12-07T18:19:15,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/643b4938ca8643709aec264d67ccd5ae is 50, key is test_row_0/C:col10/1733595554920/Put/seqid=0 2024-12-07T18:19:15,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-07T18:19:15,827 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-07T18:19:15,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-07T18:19:15,835 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:15,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:15,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741883_1059 (size=12151) 2024-12-07T18:19:15,837 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:15,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:15,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595615895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595615897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595615900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595615902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:15,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595615906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:15,990 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:15,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:15,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:15,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:15,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:15,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:15,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:15,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:16,144 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/643b4938ca8643709aec264d67ccd5ae 2024-12-07T18:19:16,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/82933d5060444ff3a939c1dd36dab625 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625 2024-12-07T18:19:16,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625, entries=150, sequenceid=217, filesize=11.9 K 2024-12-07T18:19:16,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/6dd5e97ef7924eaa8021d248e9675381 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381 2024-12-07T18:19:16,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381, entries=150, sequenceid=217, filesize=11.9 K 2024-12-07T18:19:16,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/643b4938ca8643709aec264d67ccd5ae as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae 2024-12-07T18:19:16,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae, entries=150, sequenceid=217, filesize=11.9 K 2024-12-07T18:19:16,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for a05999984107cee49bb0b7292dd34cbb in 1060ms, sequenceid=217, compaction requested=false 2024-12-07T18:19:16,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:16,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:16,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:16,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/13b1a89a3b4641a18bdcece741674a43 is 50, key is test_row_0/A:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:16,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595616433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595616437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595616442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595616443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595616443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741884_1060 (size=12151) 2024-12-07T18:19:16,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/13b1a89a3b4641a18bdcece741674a43 2024-12-07T18:19:16,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/774bc9febdfe45418cdd4f3571a70af8 is 50, key is test_row_0/B:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:16,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741885_1061 (size=12151) 2024-12-07T18:19:16,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/774bc9febdfe45418cdd4f3571a70af8 2024-12-07T18:19:16,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/00e28abc7fed4c0b99803c8ff2aee4e3 is 50, key is test_row_0/C:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:16,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595616545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595616546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595616550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595616550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595616550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741886_1062 (size=12151) 2024-12-07T18:19:16,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595616749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595616752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595616754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:16,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595616756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595616756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:16,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:16,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:16,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/00e28abc7fed4c0b99803c8ff2aee4e3 2024-12-07T18:19:16,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/13b1a89a3b4641a18bdcece741674a43 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43 2024-12-07T18:19:16,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43, entries=150, sequenceid=233, filesize=11.9 K 2024-12-07T18:19:17,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/774bc9febdfe45418cdd4f3571a70af8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8 2024-12-07T18:19:17,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8, entries=150, sequenceid=233, filesize=11.9 K 2024-12-07T18:19:17,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/00e28abc7fed4c0b99803c8ff2aee4e3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3 2024-12-07T18:19:17,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3, entries=150, sequenceid=233, filesize=11.9 K 2024-12-07T18:19:17,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a05999984107cee49bb0b7292dd34cbb in 611ms, sequenceid=233, compaction requested=true 2024-12-07T18:19:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,020 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:17,020 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:17,021 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:17,021 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:17,021 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,021 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/712db7fc3dae43028a6183c882d83239, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=36.0 K 2024-12-07T18:19:17,022 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 712db7fc3dae43028a6183c882d83239, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:17,022 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:17,022 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:17,022 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,023 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28c7150e91664d4b8caa21702fd6c721, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=36.0 K 2024-12-07T18:19:17,023 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82933d5060444ff3a939c1dd36dab625, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733595554920 2024-12-07T18:19:17,023 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 28c7150e91664d4b8caa21702fd6c721, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:17,024 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dd5e97ef7924eaa8021d248e9675381, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733595554920 2024-12-07T18:19:17,024 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13b1a89a3b4641a18bdcece741674a43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:17,026 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 774bc9febdfe45418cdd4f3571a70af8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:17,038 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:17,039 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/494d79a18b9e4f378f0107cd6ac0532e is 50, key is test_row_0/A:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:17,045 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:17,048 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/785b1d28f16e483fa52a229ccf9df31b is 50, key is test_row_0/B:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741887_1063 (size=12697) 2024-12-07T18:19:17,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:17,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:17,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,074 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/494d79a18b9e4f378f0107cd6ac0532e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/494d79a18b9e4f378f0107cd6ac0532e 2024-12-07T18:19:17,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741888_1064 (size=12697) 2024-12-07T18:19:17,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595617076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595617082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595617080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595617082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595617084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/afc87b38451a44c491beb63b0e335883 is 50, key is test_row_0/A:col10/1733595557059/Put/seqid=0 2024-12-07T18:19:17,090 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 494d79a18b9e4f378f0107cd6ac0532e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:17,090 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,090 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=13, startTime=1733595557019; duration=0sec 2024-12-07T18:19:17,090 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:17,090 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:17,090 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:17,092 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:17,092 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:17,092 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,092 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/3d454caad9c24b3cb15a173617f9c379, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=36.0 K 2024-12-07T18:19:17,093 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d454caad9c24b3cb15a173617f9c379, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595554885 2024-12-07T18:19:17,093 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 643b4938ca8643709aec264d67ccd5ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733595554920 2024-12-07T18:19:17,093 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00e28abc7fed4c0b99803c8ff2aee4e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:17,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741889_1065 (size=12251) 2024-12-07T18:19:17,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/afc87b38451a44c491beb63b0e335883 2024-12-07T18:19:17,123 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:17,124 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/306b6b5c55a04378887a57707c3e9e09 is 50, key is test_row_0/C:col10/1733595556407/Put/seqid=0 2024-12-07T18:19:17,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e48b70258d4141b4b4e7a9fd061ec506 is 50, key is test_row_0/B:col10/1733595557059/Put/seqid=0 2024-12-07T18:19:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741891_1067 (size=12251) 2024-12-07T18:19:17,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741890_1066 (size=12697) 2024-12-07T18:19:17,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e48b70258d4141b4b4e7a9fd061ec506 2024-12-07T18:19:17,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e5c026805770484d963601dbaa15ebc2 is 50, key is test_row_0/C:col10/1733595557059/Put/seqid=0 2024-12-07T18:19:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741892_1068 (size=12251) 2024-12-07T18:19:17,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e5c026805770484d963601dbaa15ebc2 2024-12-07T18:19:17,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595617188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595617189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595617189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595617189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595617190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/afc87b38451a44c491beb63b0e335883 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883 2024-12-07T18:19:17,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883, entries=150, sequenceid=258, filesize=12.0 K 2024-12-07T18:19:17,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e48b70258d4141b4b4e7a9fd061ec506 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506 2024-12-07T18:19:17,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506, entries=150, sequenceid=258, filesize=12.0 K 2024-12-07T18:19:17,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e5c026805770484d963601dbaa15ebc2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2 2024-12-07T18:19:17,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:17,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:17,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:17,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2, entries=150, sequenceid=258, filesize=12.0 K 2024-12-07T18:19:17,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for a05999984107cee49bb0b7292dd34cbb in 175ms, sequenceid=258, compaction requested=false 2024-12-07T18:19:17,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-07T18:19:17,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,386 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:19:17,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:17,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:17,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7d023e65fe594b149a80d14d03b12402 is 50, key is test_row_0/A:col10/1733595557075/Put/seqid=0 2024-12-07T18:19:17,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:17,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741893_1069 (size=12301) 2024-12-07T18:19:17,422 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7d023e65fe594b149a80d14d03b12402 2024-12-07T18:19:17,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595617423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595617429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595617431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595617433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595617433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/685fc747bd7643deb2cf123d267b663a is 50, key is test_row_0/B:col10/1733595557075/Put/seqid=0 2024-12-07T18:19:17,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741894_1070 (size=12301) 2024-12-07T18:19:17,488 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/785b1d28f16e483fa52a229ccf9df31b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/785b1d28f16e483fa52a229ccf9df31b 2024-12-07T18:19:17,496 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 785b1d28f16e483fa52a229ccf9df31b(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:17,496 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,496 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=13, startTime=1733595557020; duration=0sec 2024-12-07T18:19:17,496 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:17,496 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:17,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595617534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595617536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595617538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595617539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595617539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,549 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/306b6b5c55a04378887a57707c3e9e09 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/306b6b5c55a04378887a57707c3e9e09 2024-12-07T18:19:17,556 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 306b6b5c55a04378887a57707c3e9e09(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:17,556 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,556 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=13, startTime=1733595557020; duration=0sec 2024-12-07T18:19:17,556 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:17,556 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:17,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595617737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595617741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595617742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595617743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:17,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595617744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:17,852 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/685fc747bd7643deb2cf123d267b663a 2024-12-07T18:19:17,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fca7a027e63a412290bd2d12c8c86d1e is 50, key is test_row_0/C:col10/1733595557075/Put/seqid=0 2024-12-07T18:19:17,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741895_1071 (size=12301) 2024-12-07T18:19:17,910 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fca7a027e63a412290bd2d12c8c86d1e 2024-12-07T18:19:17,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7d023e65fe594b149a80d14d03b12402 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402 2024-12-07T18:19:17,929 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402, entries=150, sequenceid=271, filesize=12.0 K 2024-12-07T18:19:17,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/685fc747bd7643deb2cf123d267b663a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a 2024-12-07T18:19:17,945 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a, entries=150, sequenceid=271, filesize=12.0 K 2024-12-07T18:19:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:17,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fca7a027e63a412290bd2d12c8c86d1e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e 2024-12-07T18:19:17,960 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e, entries=150, sequenceid=271, filesize=12.0 K 2024-12-07T18:19:17,961 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a05999984107cee49bb0b7292dd34cbb in 575ms, sequenceid=271, compaction requested=true 2024-12-07T18:19:17,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:17,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:17,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-07T18:19:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-07T18:19:17,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-07T18:19:17,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1260 sec 2024-12-07T18:19:17,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.1360 sec 2024-12-07T18:19:18,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:18,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:18,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/23b92a40c4804e46a7de22e855bb129c is 50, key is test_row_0/A:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595618058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595618059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595618060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595618062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595618063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741896_1072 (size=14741) 2024-12-07T18:19:18,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/23b92a40c4804e46a7de22e855bb129c 2024-12-07T18:19:18,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/9ea2ef2bc09844039a68048d8df39872 is 50, key is test_row_0/B:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741897_1073 (size=12301) 2024-12-07T18:19:18,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/9ea2ef2bc09844039a68048d8df39872 2024-12-07T18:19:18,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/818fbb07f43640889078adc15bd36280 is 50, key is test_row_0/C:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741898_1074 (size=12301) 2024-12-07T18:19:18,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/818fbb07f43640889078adc15bd36280 2024-12-07T18:19:18,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595618166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/23b92a40c4804e46a7de22e855bb129c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c 2024-12-07T18:19:18,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595618167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595618168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595618169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595618170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c, entries=200, sequenceid=299, filesize=14.4 K 2024-12-07T18:19:18,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/9ea2ef2bc09844039a68048d8df39872 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872 2024-12-07T18:19:18,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872, entries=150, sequenceid=299, filesize=12.0 K 2024-12-07T18:19:18,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/818fbb07f43640889078adc15bd36280 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280 2024-12-07T18:19:18,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280, entries=150, sequenceid=299, filesize=12.0 K 2024-12-07T18:19:18,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a05999984107cee49bb0b7292dd34cbb in 159ms, sequenceid=299, compaction requested=true 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:18,203 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:18,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:19:18,203 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:18,205 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51990 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:18,205 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:18,205 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:18,205 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/494d79a18b9e4f378f0107cd6ac0532e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=50.8 K 2024-12-07T18:19:18,206 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 494d79a18b9e4f378f0107cd6ac0532e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:18,206 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49550 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:18,206 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:18,206 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:18,206 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/785b1d28f16e483fa52a229ccf9df31b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.4 K 2024-12-07T18:19:18,206 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting afc87b38451a44c491beb63b0e335883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733595557059 2024-12-07T18:19:18,207 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 785b1d28f16e483fa52a229ccf9df31b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:18,207 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d023e65fe594b149a80d14d03b12402, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595557070 2024-12-07T18:19:18,207 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e48b70258d4141b4b4e7a9fd061ec506, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733595557059 2024-12-07T18:19:18,207 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 685fc747bd7643deb2cf123d267b663a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595557070 2024-12-07T18:19:18,207 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 23b92a40c4804e46a7de22e855bb129c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:18,208 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ea2ef2bc09844039a68048d8df39872, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:18,237 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#60 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:18,237 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:18,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b7625019760747079d1917e1364c2af5 is 50, key is test_row_0/A:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,238 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/a157cb631cd04c3796761bebff1b4a6f is 50, key is test_row_0/B:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741900_1076 (size=12983) 2024-12-07T18:19:18,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741899_1075 (size=12983) 2024-12-07T18:19:18,307 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b7625019760747079d1917e1364c2af5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7625019760747079d1917e1364c2af5 2024-12-07T18:19:18,317 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into b7625019760747079d1917e1364c2af5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:18,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:18,317 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595558203; duration=0sec 2024-12-07T18:19:18,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:18,318 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:18,318 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:18,320 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49550 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:18,320 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:18,320 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:18,320 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/306b6b5c55a04378887a57707c3e9e09, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.4 K 2024-12-07T18:19:18,321 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 306b6b5c55a04378887a57707c3e9e09, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733595555275 2024-12-07T18:19:18,322 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e5c026805770484d963601dbaa15ebc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733595557059 2024-12-07T18:19:18,322 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fca7a027e63a412290bd2d12c8c86d1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595557070 2024-12-07T18:19:18,323 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 818fbb07f43640889078adc15bd36280, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:18,335 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#62 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:18,336 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/c7c0d51da1264074aa540b299917fdf6 is 50, key is test_row_0/C:col10/1733595558041/Put/seqid=0 2024-12-07T18:19:18,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741901_1077 (size=12983) 2024-12-07T18:19:18,354 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/c7c0d51da1264074aa540b299917fdf6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/c7c0d51da1264074aa540b299917fdf6 2024-12-07T18:19:18,369 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into c7c0d51da1264074aa540b299917fdf6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:18,369 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:18,369 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595558203; duration=0sec 2024-12-07T18:19:18,370 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:18,370 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:18,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:18,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:18,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7b8b8eedcee04e98b321c96226deb03d is 50, key is test_row_0/A:col10/1733595558058/Put/seqid=0 2024-12-07T18:19:18,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741902_1078 (size=12301) 2024-12-07T18:19:18,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7b8b8eedcee04e98b321c96226deb03d 2024-12-07T18:19:18,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595618408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595618410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595618412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595618414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595618412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/05a824210c4447b8879d73a3a7750996 is 50, key is test_row_0/B:col10/1733595558058/Put/seqid=0 2024-12-07T18:19:18,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741903_1079 (size=12301) 2024-12-07T18:19:18,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/05a824210c4447b8879d73a3a7750996 2024-12-07T18:19:18,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 is 50, key is test_row_0/C:col10/1733595558058/Put/seqid=0 2024-12-07T18:19:18,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741904_1080 (size=12301) 2024-12-07T18:19:18,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595618515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595618515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595618518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595618520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595618521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,694 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/a157cb631cd04c3796761bebff1b4a6f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a157cb631cd04c3796761bebff1b4a6f 2024-12-07T18:19:18,704 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into a157cb631cd04c3796761bebff1b4a6f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:18,704 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:18,704 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595558203; duration=0sec 2024-12-07T18:19:18,705 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:18,705 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:18,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595618720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595618723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595618726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595618727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595618727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:18,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 2024-12-07T18:19:18,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7b8b8eedcee04e98b321c96226deb03d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d 2024-12-07T18:19:18,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:19:18,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/05a824210c4447b8879d73a3a7750996 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996 2024-12-07T18:19:18,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:19:18,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 2024-12-07T18:19:18,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:19:18,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for a05999984107cee49bb0b7292dd34cbb in 553ms, sequenceid=312, compaction requested=false 2024-12-07T18:19:18,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:19,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-07T18:19:19,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:19,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:19,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:19,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:19,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:19,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:19,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7dc1ee430bde476fb02ee7cfe0514097 is 50, key is test_row_0/A:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:19,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595619032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595619034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595619036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595619039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595619039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741905_1081 (size=14741) 2024-12-07T18:19:19,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7dc1ee430bde476fb02ee7cfe0514097 2024-12-07T18:19:19,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/d5390288eff24da5b2dd2b08665582a1 is 50, key is test_row_0/B:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:19,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741906_1082 (size=12301) 2024-12-07T18:19:19,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595619141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595619142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595619143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595619144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595619144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595619345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595619347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595619348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595619351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595619352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/d5390288eff24da5b2dd2b08665582a1 2024-12-07T18:19:19,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/43c2800e213e4bd09045e423e0f0bf66 is 50, key is test_row_0/C:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741907_1083 (size=12301) 2024-12-07T18:19:19,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595619651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595619651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595619656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595619657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:19,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595619658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:19,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/43c2800e213e4bd09045e423e0f0bf66 2024-12-07T18:19:19,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/7dc1ee430bde476fb02ee7cfe0514097 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097 2024-12-07T18:19:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-07T18:19:19,949 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-07T18:19:19,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-07T18:19:19,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097, entries=200, sequenceid=340, filesize=14.4 K 2024-12-07T18:19:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T18:19:19,955 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:19,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/d5390288eff24da5b2dd2b08665582a1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1 2024-12-07T18:19:19,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:19,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:19,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1, entries=150, sequenceid=340, filesize=12.0 K 2024-12-07T18:19:19,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/43c2800e213e4bd09045e423e0f0bf66 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66 2024-12-07T18:19:19,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66, entries=150, sequenceid=340, filesize=12.0 K 2024-12-07T18:19:19,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for a05999984107cee49bb0b7292dd34cbb in 950ms, sequenceid=340, compaction requested=true 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:19,975 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:19,975 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:19,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:19,977 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:19,977 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:19,978 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:19,978 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:19,978 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:19,978 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:19,978 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7625019760747079d1917e1364c2af5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=39.1 K 2024-12-07T18:19:19,979 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a157cb631cd04c3796761bebff1b4a6f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=36.7 K 2024-12-07T18:19:19,979 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7625019760747079d1917e1364c2af5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:19,979 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a157cb631cd04c3796761bebff1b4a6f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:19,980 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b8b8eedcee04e98b321c96226deb03d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595558052 2024-12-07T18:19:19,980 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 05a824210c4447b8879d73a3a7750996, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595558052 2024-12-07T18:19:19,980 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dc1ee430bde476fb02ee7cfe0514097, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:19,981 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d5390288eff24da5b2dd2b08665582a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:19,997 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:19,998 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/00e7d1bc0bd24afbb9f7c5c728d29459 is 50, key is test_row_0/A:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:20,002 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:20,003 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/8258ac5b1a694657bf2415b4be18d662 is 50, key is test_row_0/B:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:20,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741908_1084 (size=13085) 2024-12-07T18:19:20,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741909_1085 (size=13085) 2024-12-07T18:19:20,023 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/00e7d1bc0bd24afbb9f7c5c728d29459 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/00e7d1bc0bd24afbb9f7c5c728d29459 2024-12-07T18:19:20,031 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 00e7d1bc0bd24afbb9f7c5c728d29459(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:20,031 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:20,031 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=13, startTime=1733595559975; duration=0sec 2024-12-07T18:19:20,031 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:20,031 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:20,031 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:20,033 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:20,033 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:20,033 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:20,033 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/c7c0d51da1264074aa540b299917fdf6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=36.7 K 2024-12-07T18:19:20,034 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7c0d51da1264074aa540b299917fdf6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733595557417 2024-12-07T18:19:20,034 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed8e2feb7d9447f8b89e0c58ea1c76d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595558052 2024-12-07T18:19:20,035 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43c2800e213e4bd09045e423e0f0bf66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:20,046 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#71 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:20,048 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fdbebd756eb7487d93ddf1d187e0c7d5 is 50, key is test_row_0/C:col10/1733595558410/Put/seqid=0 2024-12-07T18:19:20,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741910_1086 (size=13085) 2024-12-07T18:19:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T18:19:20,108 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-07T18:19:20,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:20,109 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-07T18:19:20,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:20,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/14a6f5c812374b61b081dbfab3bffe7e is 50, key is test_row_0/A:col10/1733595559032/Put/seqid=0 2024-12-07T18:19:20,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741911_1087 (size=12301) 2024-12-07T18:19:20,122 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/14a6f5c812374b61b081dbfab3bffe7e 2024-12-07T18:19:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/1cc712f0314f45159a16d0b706995f1d is 50, key is test_row_0/B:col10/1733595559032/Put/seqid=0 2024-12-07T18:19:20,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741912_1088 (size=12301) 2024-12-07T18:19:20,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:20,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:20,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595620184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595620184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595620186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595620187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595620189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T18:19:20,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595620292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595620293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595620293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595620294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595620294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,425 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/8258ac5b1a694657bf2415b4be18d662 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8258ac5b1a694657bf2415b4be18d662 2024-12-07T18:19:20,438 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 8258ac5b1a694657bf2415b4be18d662(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:20,438 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:20,438 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=13, startTime=1733595559975; duration=0sec 2024-12-07T18:19:20,439 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:20,439 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:20,463 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fdbebd756eb7487d93ddf1d187e0c7d5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdbebd756eb7487d93ddf1d187e0c7d5 2024-12-07T18:19:20,472 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into fdbebd756eb7487d93ddf1d187e0c7d5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:20,472 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:20,473 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=13, startTime=1733595559975; duration=0sec 2024-12-07T18:19:20,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:20,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:20,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595620496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595620497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595620498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595620500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595620501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,539 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/1cc712f0314f45159a16d0b706995f1d 2024-12-07T18:19:20,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/85c2f87d63964930833a3eae202e48b5 is 50, key is test_row_0/C:col10/1733595559032/Put/seqid=0 2024-12-07T18:19:20,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T18:19:20,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741913_1089 (size=12301) 2024-12-07T18:19:20,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595620800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595620803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595620804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595620805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:20,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595620806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:20,990 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/85c2f87d63964930833a3eae202e48b5 2024-12-07T18:19:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/14a6f5c812374b61b081dbfab3bffe7e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e 2024-12-07T18:19:21,009 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e, entries=150, sequenceid=350, filesize=12.0 K 2024-12-07T18:19:21,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/1cc712f0314f45159a16d0b706995f1d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d 2024-12-07T18:19:21,026 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d, entries=150, sequenceid=350, filesize=12.0 K 2024-12-07T18:19:21,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/85c2f87d63964930833a3eae202e48b5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5 2024-12-07T18:19:21,037 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5, entries=150, sequenceid=350, filesize=12.0 K 2024-12-07T18:19:21,040 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for a05999984107cee49bb0b7292dd34cbb in 931ms, sequenceid=350, compaction requested=false 2024-12-07T18:19:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-07T18:19:21,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-07T18:19:21,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-07T18:19:21,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0860 sec 2024-12-07T18:19:21,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0930 sec 2024-12-07T18:19:21,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T18:19:21,063 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-07T18:19:21,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:21,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-07T18:19:21,068 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:21,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-07T18:19:21,068 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:21,069 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:21,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-07T18:19:21,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-07T18:19:21,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:21,222 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:21,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1186513eed5d4919ba0c437d40909032 is 50, key is test_row_0/A:col10/1733595560188/Put/seqid=0 2024-12-07T18:19:21,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741914_1090 (size=12301) 2024-12-07T18:19:21,252 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1186513eed5d4919ba0c437d40909032 2024-12-07T18:19:21,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/72fe2ae22b484cf2a4c09427c3fd3b7c is 50, key is test_row_0/B:col10/1733595560188/Put/seqid=0 2024-12-07T18:19:21,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741915_1091 (size=12301) 2024-12-07T18:19:21,271 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/72fe2ae22b484cf2a4c09427c3fd3b7c 2024-12-07T18:19:21,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e99a873e949e43d1aa3d039dbf1325e7 is 50, key is test_row_0/C:col10/1733595560188/Put/seqid=0 2024-12-07T18:19:21,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741916_1092 (size=12301) 2024-12-07T18:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:21,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:21,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595621311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595621312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595621312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595621313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595621314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-07T18:19:21,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595621416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595621417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595621417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595621418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595621621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595621621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595621621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595621621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-07T18:19:21,688 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e99a873e949e43d1aa3d039dbf1325e7 2024-12-07T18:19:21,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1186513eed5d4919ba0c437d40909032 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032 2024-12-07T18:19:21,707 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032, entries=150, sequenceid=379, filesize=12.0 K 2024-12-07T18:19:21,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/72fe2ae22b484cf2a4c09427c3fd3b7c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c 2024-12-07T18:19:21,715 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c, entries=150, sequenceid=379, filesize=12.0 K 2024-12-07T18:19:21,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/e99a873e949e43d1aa3d039dbf1325e7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7 2024-12-07T18:19:21,722 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7, entries=150, sequenceid=379, filesize=12.0 K 2024-12-07T18:19:21,723 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for a05999984107cee49bb0b7292dd34cbb in 502ms, sequenceid=379, compaction requested=true 2024-12-07T18:19:21,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:21,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:21,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-07T18:19:21,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-07T18:19:21,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-07T18:19:21,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 656 msec 2024-12-07T18:19:21,730 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 663 msec 2024-12-07T18:19:21,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:21,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:21,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:21,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/64f4ad1acfbb4317a5213a45c5936139 is 50, key is test_row_0/A:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741917_1093 (size=12301) 2024-12-07T18:19:21,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595621969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595621970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595621970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:21,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:21,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595621972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595622076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595622076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595622078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595622078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-07T18:19:22,172 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-07T18:19:22,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-07T18:19:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:22,176 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:22,177 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:22,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:22,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:22,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595622279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595622280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595622280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595622281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595622317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,329 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-07T18:19:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/64f4ad1acfbb4317a5213a45c5936139 2024-12-07T18:19:22,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77f9be276ccd42fc973971cba8818c87 is 50, key is test_row_0/B:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:22,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741918_1094 (size=12301) 2024-12-07T18:19:22,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77f9be276ccd42fc973971cba8818c87 2024-12-07T18:19:22,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fe114c60dd374f82bab6a31d3b5f58ca is 50, key is test_row_0/C:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741919_1095 (size=12301) 2024-12-07T18:19:22,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fe114c60dd374f82bab6a31d3b5f58ca 2024-12-07T18:19:22,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/64f4ad1acfbb4317a5213a45c5936139 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139 2024-12-07T18:19:22,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:22,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139, entries=150, sequenceid=391, filesize=12.0 K 2024-12-07T18:19:22,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-07T18:19:22,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:22,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/77f9be276ccd42fc973971cba8818c87 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87 2024-12-07T18:19:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87, entries=150, sequenceid=391, filesize=12.0 K 2024-12-07T18:19:22,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fe114c60dd374f82bab6a31d3b5f58ca as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca 2024-12-07T18:19:22,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca, entries=150, sequenceid=391, filesize=12.0 K 2024-12-07T18:19:22,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for a05999984107cee49bb0b7292dd34cbb in 590ms, sequenceid=391, compaction requested=true 2024-12-07T18:19:22,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:22,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:22,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:22,520 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:22,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:22,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:22,520 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:22,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:22,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:22,522 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:22,522 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:22,522 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:22,522 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:22,522 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,522 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,522 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/00e7d1bc0bd24afbb9f7c5c728d29459, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.8 K 2024-12-07T18:19:22,522 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8258ac5b1a694657bf2415b4be18d662, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.8 K 2024-12-07T18:19:22,523 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00e7d1bc0bd24afbb9f7c5c728d29459, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:22,523 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8258ac5b1a694657bf2415b4be18d662, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:22,524 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14a6f5c812374b61b081dbfab3bffe7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1733595559032 2024-12-07T18:19:22,524 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cc712f0314f45159a16d0b706995f1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1733595559032 2024-12-07T18:19:22,524 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1186513eed5d4919ba0c437d40909032, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733595560182 2024-12-07T18:19:22,525 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 72fe2ae22b484cf2a4c09427c3fd3b7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733595560182 2024-12-07T18:19:22,525 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64f4ad1acfbb4317a5213a45c5936139, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:22,525 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 77f9be276ccd42fc973971cba8818c87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:22,544 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:22,544 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/3e1ea8563d35408da051d6962a37e1d6 is 50, key is test_row_0/B:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:22,554 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#82 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:22,555 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/43844bf1309c44e294b8ccf15025bbdb is 50, key is test_row_0/A:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:22,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741920_1096 (size=13221) 2024-12-07T18:19:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741921_1097 (size=13221) 2024-12-07T18:19:22,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/3e1ea8563d35408da051d6962a37e1d6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3e1ea8563d35408da051d6962a37e1d6 2024-12-07T18:19:22,586 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/43844bf1309c44e294b8ccf15025bbdb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/43844bf1309c44e294b8ccf15025bbdb 2024-12-07T18:19:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:22,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-07T18:19:22,597 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 43844bf1309c44e294b8ccf15025bbdb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:22,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:22,597 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595562520; duration=0sec 2024-12-07T18:19:22,597 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 3e1ea8563d35408da051d6962a37e1d6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:22,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:22,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:22,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:22,597 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:22,597 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595562520; duration=0sec 2024-12-07T18:19:22,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:22,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:22,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:22,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,599 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:22,600 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:22,600 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:22,600 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:22,600 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,601 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdbebd756eb7487d93ddf1d187e0c7d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.8 K 2024-12-07T18:19:22,601 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdbebd756eb7487d93ddf1d187e0c7d5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1733595558410 2024-12-07T18:19:22,602 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85c2f87d63964930833a3eae202e48b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1733595559032 2024-12-07T18:19:22,602 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e99a873e949e43d1aa3d039dbf1325e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733595560182 2024-12-07T18:19:22,603 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe114c60dd374f82bab6a31d3b5f58ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:22,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/245d32a5bf8c4619ba2f6f7f481b2e81 is 50, key is test_row_0/A:col10/1733595561967/Put/seqid=0 2024-12-07T18:19:22,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741922_1098 (size=12301) 2024-12-07T18:19:22,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/245d32a5bf8c4619ba2f6f7f481b2e81 2024-12-07T18:19:22,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595622612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595622612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595622619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595622621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,630 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:22,631 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/45f6b860f13e402e97da09f42db710a6 is 50, key is test_row_0/C:col10/1733595561928/Put/seqid=0 2024-12-07T18:19:22,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-07T18:19:22,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fefd404f7f104778b9baeb4de51be029 is 50, key is test_row_0/B:col10/1733595561967/Put/seqid=0 2024-12-07T18:19:22,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:22,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741923_1099 (size=13221) 2024-12-07T18:19:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741924_1100 (size=12301) 2024-12-07T18:19:22,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fefd404f7f104778b9baeb4de51be029 2024-12-07T18:19:22,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fdcb1a01a7014fa5bc9f9496fd2cc454 is 50, key is test_row_0/C:col10/1733595561967/Put/seqid=0 2024-12-07T18:19:22,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595622721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595622721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595622723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595622725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741925_1101 (size=12301) 2024-12-07T18:19:22,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fdcb1a01a7014fa5bc9f9496fd2cc454 2024-12-07T18:19:22,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/245d32a5bf8c4619ba2f6f7f481b2e81 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81 2024-12-07T18:19:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:22,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81, entries=150, sequenceid=421, filesize=12.0 K 2024-12-07T18:19:22,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/fefd404f7f104778b9baeb4de51be029 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029 2024-12-07T18:19:22,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029, entries=150, sequenceid=421, filesize=12.0 K 2024-12-07T18:19:22,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/fdcb1a01a7014fa5bc9f9496fd2cc454 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454 2024-12-07T18:19:22,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454, entries=150, sequenceid=421, filesize=12.0 K 2024-12-07T18:19:22,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-07T18:19:22,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for a05999984107cee49bb0b7292dd34cbb in 203ms, sequenceid=421, compaction requested=false 2024-12-07T18:19:22,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:22,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:22,800 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:22,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/67b41206a3d143ecb73649825f4c569f is 50, key is test_row_0/A:col10/1733595562607/Put/seqid=0 2024-12-07T18:19:22,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741926_1102 (size=9857) 2024-12-07T18:19:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:22,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:22,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595622965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595622968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595622969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:22,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:22,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595622971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595623072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595623072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595623074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595623075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,081 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/45f6b860f13e402e97da09f42db710a6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/45f6b860f13e402e97da09f42db710a6 2024-12-07T18:19:23,089 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 45f6b860f13e402e97da09f42db710a6(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:23,089 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:23,089 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595562520; duration=0sec 2024-12-07T18:19:23,089 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:23,089 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:23,216 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/67b41206a3d143ecb73649825f4c569f 2024-12-07T18:19:23,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/80fcd7fc70854452bf62799cc92d1b9c is 50, key is test_row_0/B:col10/1733595562607/Put/seqid=0 2024-12-07T18:19:23,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741927_1103 (size=9857) 2024-12-07T18:19:23,247 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/80fcd7fc70854452bf62799cc92d1b9c 2024-12-07T18:19:23,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6c3c234a69e94bbb872e054a4e67b0d9 is 50, key is test_row_0/C:col10/1733595562607/Put/seqid=0 2024-12-07T18:19:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741928_1104 (size=9857) 2024-12-07T18:19:23,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595623275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595623276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:23,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595623277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595623279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595623579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595623579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595623583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595623583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:23,674 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6c3c234a69e94bbb872e054a4e67b0d9 2024-12-07T18:19:23,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/67b41206a3d143ecb73649825f4c569f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f 2024-12-07T18:19:23,691 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f, entries=100, sequenceid=430, filesize=9.6 K 2024-12-07T18:19:23,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/80fcd7fc70854452bf62799cc92d1b9c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c 2024-12-07T18:19:23,701 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c, entries=100, sequenceid=430, filesize=9.6 K 2024-12-07T18:19:23,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6c3c234a69e94bbb872e054a4e67b0d9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9 2024-12-07T18:19:23,717 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9, entries=100, sequenceid=430, filesize=9.6 K 2024-12-07T18:19:23,718 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for a05999984107cee49bb0b7292dd34cbb in 919ms, sequenceid=430, compaction requested=true 2024-12-07T18:19:23,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:23,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:23,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-07T18:19:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-07T18:19:23,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-07T18:19:23,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5420 sec 2024-12-07T18:19:23,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.5490 sec 2024-12-07T18:19:24,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:24,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-07T18:19:24,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:24,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:24,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:24,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/2beeec1fd7914716a728d6cb0a87dfac is 50, key is test_row_0/A:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595624089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595624096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595624096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595624093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741929_1105 (size=14741) 2024-12-07T18:19:24,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595624193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595624199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-07T18:19:24,281 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-07T18:19:24,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:24,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-07T18:19:24,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:24,284 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:24,284 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:24,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:24,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595624322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,326 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:24,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595624396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595624400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/2beeec1fd7914716a728d6cb0a87dfac 2024-12-07T18:19:24,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2281cefccfce4807ac7e2c4427c26635 is 50, key is test_row_0/B:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741930_1106 (size=12301) 2024-12-07T18:19:24,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2281cefccfce4807ac7e2c4427c26635 2024-12-07T18:19:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:24,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:24,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:24,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7f84b51b6c884b93b83acbb444bf7796 is 50, key is test_row_0/C:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741931_1107 (size=12301) 2024-12-07T18:19:24,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7f84b51b6c884b93b83acbb444bf7796 2024-12-07T18:19:24,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/2beeec1fd7914716a728d6cb0a87dfac as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac 2024-12-07T18:19:24,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac, entries=200, sequenceid=460, filesize=14.4 K 2024-12-07T18:19:24,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2281cefccfce4807ac7e2c4427c26635 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635 2024-12-07T18:19:24,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635, entries=150, sequenceid=460, filesize=12.0 K 2024-12-07T18:19:24,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7f84b51b6c884b93b83acbb444bf7796 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796 2024-12-07T18:19:24,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796, entries=150, sequenceid=460, filesize=12.0 K 2024-12-07T18:19:24,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for a05999984107cee49bb0b7292dd34cbb in 579ms, sequenceid=460, compaction requested=true 2024-12-07T18:19:24,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:24,662 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:24,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:24,663 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50120 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:24,664 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:24,664 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,664 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/43844bf1309c44e294b8ccf15025bbdb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=48.9 K 2024-12-07T18:19:24,664 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43844bf1309c44e294b8ccf15025bbdb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:24,672 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 245d32a5bf8c4619ba2f6f7f481b2e81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733595561967 2024-12-07T18:19:24,673 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67b41206a3d143ecb73649825f4c569f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733595562607 2024-12-07T18:19:24,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:24,673 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2beeec1fd7914716a728d6cb0a87dfac, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562962 2024-12-07T18:19:24,673 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:24,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:24,676 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47680 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:24,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:24,677 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:24,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:24,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:24,677 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,677 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3e1ea8563d35408da051d6962a37e1d6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=46.6 K 2024-12-07T18:19:24,677 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e1ea8563d35408da051d6962a37e1d6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:24,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fefd404f7f104778b9baeb4de51be029, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733595561967 2024-12-07T18:19:24,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 80fcd7fc70854452bf62799cc92d1b9c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733595562607 2024-12-07T18:19:24,681 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2281cefccfce4807ac7e2c4427c26635, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562967 2024-12-07T18:19:24,689 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:24,690 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/f64168041d554f47962c8cefda2a844b is 50, key is test_row_0/A:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,702 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:24,703 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2cbc9330936541c58e2437f1bc53e361 is 50, key is test_row_0/B:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:24,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:24,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741932_1108 (size=13357) 2024-12-07T18:19:24,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/e9f33e75d916412cbc3849f72b4ad81e is 50, key is test_row_0/A:col10/1733595564707/Put/seqid=0 2024-12-07T18:19:24,732 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/f64168041d554f47962c8cefda2a844b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/f64168041d554f47962c8cefda2a844b 2024-12-07T18:19:24,740 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into f64168041d554f47962c8cefda2a844b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:24,740 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:24,740 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595564662; duration=0sec 2024-12-07T18:19:24,740 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:24,740 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:24,740 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:24,743 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47680 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:24,743 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:24,743 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,743 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/45f6b860f13e402e97da09f42db710a6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=46.6 K 2024-12-07T18:19:24,744 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45f6b860f13e402e97da09f42db710a6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733595561310 2024-12-07T18:19:24,744 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,745 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdcb1a01a7014fa5bc9f9496fd2cc454, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733595561967 2024-12-07T18:19:24,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:24,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:24,745 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c3c234a69e94bbb872e054a4e67b0d9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733595562607 2024-12-07T18:19:24,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,746 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f84b51b6c884b93b83acbb444bf7796, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562967 2024-12-07T18:19:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741934_1110 (size=17181) 2024-12-07T18:19:24,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/e9f33e75d916412cbc3849f72b4ad81e 2024-12-07T18:19:24,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741933_1109 (size=13357) 2024-12-07T18:19:24,774 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2cbc9330936541c58e2437f1bc53e361 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2cbc9330936541c58e2437f1bc53e361 2024-12-07T18:19:24,777 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:24,778 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/70ab781ead45467fa2171104ce3903f8 is 50, key is test_row_0/C:col10/1733595562967/Put/seqid=0 2024-12-07T18:19:24,784 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 2cbc9330936541c58e2437f1bc53e361(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:24,784 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:24,784 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595564673; duration=0sec 2024-12-07T18:19:24,784 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:24,785 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:24,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595624779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595624784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/62d73a0d36494f90874a42b7a7c2f0d5 is 50, key is test_row_0/B:col10/1733595564707/Put/seqid=0 2024-12-07T18:19:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741936_1112 (size=12301) 2024-12-07T18:19:24,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/62d73a0d36494f90874a42b7a7c2f0d5 2024-12-07T18:19:24,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741935_1111 (size=13357) 2024-12-07T18:19:24,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9cf043dcb76c4f759f273830039b4d59 is 50, key is test_row_0/C:col10/1733595564707/Put/seqid=0 2024-12-07T18:19:24,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741937_1113 (size=12301) 2024-12-07T18:19:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:24,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595624887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:24,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595624889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,898 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:24,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:24,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:24,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:25,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:25,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595625090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595625092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595625101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595625109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:25,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:25,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:25,217 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/70ab781ead45467fa2171104ce3903f8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/70ab781ead45467fa2171104ce3903f8 2024-12-07T18:19:25,225 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 70ab781ead45467fa2171104ce3903f8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:25,225 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:25,225 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595564676; duration=0sec 2024-12-07T18:19:25,226 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:25,226 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:25,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9cf043dcb76c4f759f273830039b4d59 2024-12-07T18:19:25,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/e9f33e75d916412cbc3849f72b4ad81e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e 2024-12-07T18:19:25,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e, entries=250, sequenceid=471, filesize=16.8 K 2024-12-07T18:19:25,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/62d73a0d36494f90874a42b7a7c2f0d5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5 2024-12-07T18:19:25,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5, entries=150, sequenceid=471, filesize=12.0 K 2024-12-07T18:19:25,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/9cf043dcb76c4f759f273830039b4d59 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59 2024-12-07T18:19:25,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59, entries=150, sequenceid=471, filesize=12.0 K 2024-12-07T18:19:25,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a05999984107cee49bb0b7292dd34cbb in 577ms, sequenceid=471, compaction requested=false 2024-12-07T18:19:25,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:25,359 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-07T18:19:25,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,361 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8713f94981c04663bd97d59dfa115c8c is 50, key is test_row_0/A:col10/1733595564775/Put/seqid=0 2024-12-07T18:19:25,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741938_1114 (size=12301) 2024-12-07T18:19:25,385 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8713f94981c04663bd97d59dfa115c8c 2024-12-07T18:19:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:25,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/24c6673674aa4adab9d364097fea5c8a is 50, key is test_row_0/B:col10/1733595564775/Put/seqid=0 2024-12-07T18:19:25,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:25,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595625415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595625417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741939_1115 (size=12301) 2024-12-07T18:19:25,434 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/24c6673674aa4adab9d364097fea5c8a 2024-12-07T18:19:25,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/4562fa8775da4c29b20e1876b9f9417c is 50, key is test_row_0/C:col10/1733595564775/Put/seqid=0 2024-12-07T18:19:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741940_1116 (size=12301) 2024-12-07T18:19:25,491 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/4562fa8775da4c29b20e1876b9f9417c 2024-12-07T18:19:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8713f94981c04663bd97d59dfa115c8c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c 2024-12-07T18:19:25,518 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:19:25,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/24c6673674aa4adab9d364097fea5c8a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a 2024-12-07T18:19:25,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595625521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595625523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,528 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:19:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/4562fa8775da4c29b20e1876b9f9417c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c 2024-12-07T18:19:25,541 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:19:25,546 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a05999984107cee49bb0b7292dd34cbb in 185ms, sequenceid=499, compaction requested=true 2024-12-07T18:19:25,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:25,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:25,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-07T18:19:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-07T18:19:25,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-07T18:19:25,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2640 sec 2024-12-07T18:19:25,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.2690 sec 2024-12-07T18:19:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:25,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:19:25,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:25,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:25,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:25,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:25,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0dd7f14c829249b492a1aea7ed26b180 is 50, key is test_row_0/A:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741941_1117 (size=14741) 2024-12-07T18:19:25,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0dd7f14c829249b492a1aea7ed26b180 2024-12-07T18:19:25,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/12e5a8cad0c04f6098baae3d0b0b4ac6 is 50, key is test_row_0/B:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:25,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741942_1118 (size=12301) 2024-12-07T18:19:25,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595625781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595625781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595625886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:25,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595625886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595626089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595626090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/12e5a8cad0c04f6098baae3d0b0b4ac6 2024-12-07T18:19:26,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/d9b334cc39ee4e8c9a36e0a62c90fb62 is 50, key is test_row_0/C:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:26,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741943_1119 (size=12301) 2024-12-07T18:19:26,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/d9b334cc39ee4e8c9a36e0a62c90fb62 2024-12-07T18:19:26,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0dd7f14c829249b492a1aea7ed26b180 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180 2024-12-07T18:19:26,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180, entries=200, sequenceid=511, filesize=14.4 K 2024-12-07T18:19:26,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/12e5a8cad0c04f6098baae3d0b0b4ac6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6 2024-12-07T18:19:26,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6, entries=150, sequenceid=511, filesize=12.0 K 2024-12-07T18:19:26,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/d9b334cc39ee4e8c9a36e0a62c90fb62 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62 2024-12-07T18:19:26,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62, entries=150, sequenceid=511, filesize=12.0 K 2024-12-07T18:19:26,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a05999984107cee49bb0b7292dd34cbb in 496ms, sequenceid=511, compaction requested=true 2024-12-07T18:19:26,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:26,223 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:26,224 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:26,225 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57580 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:26,225 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:26,225 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,226 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/f64168041d554f47962c8cefda2a844b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=56.2 K 2024-12-07T18:19:26,226 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting f64168041d554f47962c8cefda2a844b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562967 2024-12-07T18:19:26,227 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9f33e75d916412cbc3849f72b4ad81e, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733595564088 2024-12-07T18:19:26,227 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:26,227 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:26,227 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,227 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2cbc9330936541c58e2437f1bc53e361, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=49.1 K 2024-12-07T18:19:26,228 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8713f94981c04663bd97d59dfa115c8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733595564774 2024-12-07T18:19:26,228 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cbc9330936541c58e2437f1bc53e361, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562967 2024-12-07T18:19:26,228 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 62d73a0d36494f90874a42b7a7c2f0d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733595564088 2024-12-07T18:19:26,228 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dd7f14c829249b492a1aea7ed26b180, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:26,229 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 24c6673674aa4adab9d364097fea5c8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733595564774 2024-12-07T18:19:26,229 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 12e5a8cad0c04f6098baae3d0b0b4ac6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:26,243 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#105 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:26,244 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e53a69cbd6af45608650ddb21311cafe is 50, key is test_row_0/B:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:26,251 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#106 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:26,252 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b6dd9c530d894f488628748b1814056b is 50, key is test_row_0/A:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:26,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741945_1121 (size=13493) 2024-12-07T18:19:26,283 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/b6dd9c530d894f488628748b1814056b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b6dd9c530d894f488628748b1814056b 2024-12-07T18:19:26,290 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into b6dd9c530d894f488628748b1814056b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:26,290 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:26,290 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595566223; duration=0sec 2024-12-07T18:19:26,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741944_1120 (size=13493) 2024-12-07T18:19:26,290 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:26,291 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:26,291 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:26,293 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:26,293 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:26,293 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,293 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/70ab781ead45467fa2171104ce3903f8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=49.1 K 2024-12-07T18:19:26,294 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70ab781ead45467fa2171104ce3903f8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733595562967 2024-12-07T18:19:26,294 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cf043dcb76c4f759f273830039b4d59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733595564088 2024-12-07T18:19:26,295 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4562fa8775da4c29b20e1876b9f9417c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733595564774 2024-12-07T18:19:26,295 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9b334cc39ee4e8c9a36e0a62c90fb62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:26,300 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/e53a69cbd6af45608650ddb21311cafe as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e53a69cbd6af45608650ddb21311cafe 2024-12-07T18:19:26,309 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into e53a69cbd6af45608650ddb21311cafe(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:26,309 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:26,309 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595566224; duration=0sec 2024-12-07T18:19:26,309 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:26,309 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:26,311 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:26,311 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/1cab62144eb34380b6e422300e7eed5b is 50, key is test_row_0/C:col10/1733595565413/Put/seqid=0 2024-12-07T18:19:26,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741946_1122 (size=13493) 2024-12-07T18:19:26,335 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/1cab62144eb34380b6e422300e7eed5b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/1cab62144eb34380b6e422300e7eed5b 2024-12-07T18:19:26,343 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 1cab62144eb34380b6e422300e7eed5b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:26,343 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:26,343 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595566224; duration=0sec 2024-12-07T18:19:26,343 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:26,343 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:26,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-07T18:19:26,388 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-07T18:19:26,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:26,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-07T18:19:26,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:26,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:19:26,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:26,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:26,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:26,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:26,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:26,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:26,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:26,395 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:26,396 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:26,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:26,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1aa4eabce6d345c39284905429dc5e20 is 50, key is test_row_0/A:col10/1733595565778/Put/seqid=0 2024-12-07T18:19:26,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595626408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595626410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741947_1123 (size=12301) 2024-12-07T18:19:26,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:26,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595626510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595626512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,553 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:26,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595626715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:26,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595626716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1aa4eabce6d345c39284905429dc5e20 2024-12-07T18:19:26,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/a8198e4f975c45519d077e91b93fe2e1 is 50, key is test_row_0/B:col10/1733595565778/Put/seqid=0 2024-12-07T18:19:26,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741948_1124 (size=12301) 2024-12-07T18:19:26,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/a8198e4f975c45519d077e91b93fe2e1 2024-12-07T18:19:26,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:26,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:26,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:26,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7abbbf9d44b840c2bf66d42f1ee5d2bd is 50, key is test_row_0/C:col10/1733595565778/Put/seqid=0 2024-12-07T18:19:26,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741949_1125 (size=12301) 2024-12-07T18:19:26,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:27,013 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:27,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595627018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595627018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52180 deadline: 1733595627111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,112 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:27,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52146 deadline: 1733595627126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,129 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:27,166 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:27,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:27,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:27,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:27,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:27,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7abbbf9d44b840c2bf66d42f1ee5d2bd 2024-12-07T18:19:27,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/1aa4eabce6d345c39284905429dc5e20 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20 2024-12-07T18:19:27,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20, entries=150, sequenceid=540, filesize=12.0 K 2024-12-07T18:19:27,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/a8198e4f975c45519d077e91b93fe2e1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1 2024-12-07T18:19:27,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1, entries=150, sequenceid=540, filesize=12.0 K 2024-12-07T18:19:27,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/7abbbf9d44b840c2bf66d42f1ee5d2bd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd 2024-12-07T18:19:27,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd, entries=150, sequenceid=540, filesize=12.0 K 2024-12-07T18:19:27,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a05999984107cee49bb0b7292dd34cbb in 921ms, sequenceid=540, compaction requested=false 2024-12-07T18:19:27,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:27,319 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-07T18:19:27,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:27,320 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-07T18:19:27,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:27,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:27,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:27,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:27,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:27,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:27,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/20075169b6e14d51b8a53051a559317e is 50, key is test_row_0/A:col10/1733595566405/Put/seqid=0 2024-12-07T18:19:27,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741950_1126 (size=12301) 2024-12-07T18:19:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:27,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. as already flushing 2024-12-07T18:19:27,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:27,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595627566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595627567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595627670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595627670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,743 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/20075169b6e14d51b8a53051a559317e 2024-12-07T18:19:27,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2732237e475e41d598fee997c1596c44 is 50, key is test_row_0/B:col10/1733595566405/Put/seqid=0 2024-12-07T18:19:27,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741951_1127 (size=12301) 2024-12-07T18:19:27,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595627873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:27,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595627874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,177 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2732237e475e41d598fee997c1596c44 2024-12-07T18:19:28,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595628177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595628179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6f486700a80b432ab180e71b013cfc3d is 50, key is test_row_0/C:col10/1733595566405/Put/seqid=0 2024-12-07T18:19:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741952_1128 (size=12301) 2024-12-07T18:19:28,194 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6f486700a80b432ab180e71b013cfc3d 2024-12-07T18:19:28,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/20075169b6e14d51b8a53051a559317e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e 2024-12-07T18:19:28,210 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e, entries=150, sequenceid=550, filesize=12.0 K 2024-12-07T18:19:28,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/2732237e475e41d598fee997c1596c44 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44 2024-12-07T18:19:28,217 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44, entries=150, sequenceid=550, filesize=12.0 K 2024-12-07T18:19:28,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/6f486700a80b432ab180e71b013cfc3d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d 2024-12-07T18:19:28,224 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d, entries=150, sequenceid=550, filesize=12.0 K 2024-12-07T18:19:28,225 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for a05999984107cee49bb0b7292dd34cbb in 905ms, sequenceid=550, compaction requested=true 2024-12-07T18:19:28,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:28,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:28,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-07T18:19:28,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-07T18:19:28,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-07T18:19:28,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8300 sec 2024-12-07T18:19:28,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.8410 sec 2024-12-07T18:19:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:28,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:28,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:28,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/97b2fe9b198c41f4a06e506c048ca085 is 50, key is test_row_0/A:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:28,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741953_1129 (size=12301) 2024-12-07T18:19:28,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/97b2fe9b198c41f4a06e506c048ca085 2024-12-07T18:19:28,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595628360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/ae79578340a1482cbcc9422c248c29c5 is 50, key is test_row_0/B:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:28,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741954_1130 (size=12301) 2024-12-07T18:19:28,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595628463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,469 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f34c0b8 to 127.0.0.1:56016 2024-12-07T18:19:28,469 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f18a09d to 127.0.0.1:56016 2024-12-07T18:19:28,469 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:28,469 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:28,472 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ebb9f30 to 127.0.0.1:56016 2024-12-07T18:19:28,472 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:28,473 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x426bcd11 to 127.0.0.1:56016 2024-12-07T18:19:28,473 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-07T18:19:28,498 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-07T18:19:28,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595628666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52114 deadline: 1733595628682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52124 deadline: 1733595628683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:28,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/ae79578340a1482cbcc9422c248c29c5 2024-12-07T18:19:28,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/25dddf5e1b374e3588b98d2bf58ac1ea is 50, key is test_row_0/C:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:28,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741955_1131 (size=12301) 2024-12-07T18:19:28,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52130 deadline: 1733595628969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:29,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/25dddf5e1b374e3588b98d2bf58ac1ea 2024-12-07T18:19:29,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/97b2fe9b198c41f4a06e506c048ca085 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085 2024-12-07T18:19:29,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085, entries=150, sequenceid=577, filesize=12.0 K 2024-12-07T18:19:29,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/ae79578340a1482cbcc9422c248c29c5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5 2024-12-07T18:19:29,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5, entries=150, sequenceid=577, filesize=12.0 K 2024-12-07T18:19:29,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/25dddf5e1b374e3588b98d2bf58ac1ea as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea 2024-12-07T18:19:29,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea, entries=150, sequenceid=577, filesize=12.0 K 2024-12-07T18:19:29,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for a05999984107cee49bb0b7292dd34cbb in 892ms, sequenceid=577, compaction requested=true 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:29,227 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:29,227 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a05999984107cee49bb0b7292dd34cbb:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:29,229 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:29,229 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:29,229 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/B is initiating minor compaction (all files) 2024-12-07T18:19:29,229 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/A is initiating minor compaction (all files) 2024-12-07T18:19:29,229 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/B in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:29,229 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/A in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:29,229 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b6dd9c530d894f488628748b1814056b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=49.2 K 2024-12-07T18:19:29,229 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e53a69cbd6af45608650ddb21311cafe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=49.2 K 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6dd9c530d894f488628748b1814056b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e53a69cbd6af45608650ddb21311cafe, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1aa4eabce6d345c39284905429dc5e20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733595565773 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a8198e4f975c45519d077e91b93fe2e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733595565773 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20075169b6e14d51b8a53051a559317e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=550, earliestPutTs=1733595566405 2024-12-07T18:19:29,230 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2732237e475e41d598fee997c1596c44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=550, earliestPutTs=1733595566405 2024-12-07T18:19:29,231 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97b2fe9b198c41f4a06e506c048ca085, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1733595567557 2024-12-07T18:19:29,231 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ae79578340a1482cbcc9422c248c29c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1733595567557 2024-12-07T18:19:29,241 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#B#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:29,241 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#A#compaction#118 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:29,242 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/32f8e3e30ca8420e98d3a7902fe9acd3 is 50, key is test_row_0/B:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:29,242 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8be6892e7e2d413e81d75122f9d9f330 is 50, key is test_row_0/A:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:29,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741956_1132 (size=13629) 2024-12-07T18:19:29,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741957_1133 (size=13629) 2024-12-07T18:19:29,472 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66d523ff to 127.0.0.1:56016 2024-12-07T18:19:29,472 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:29,655 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/32f8e3e30ca8420e98d3a7902fe9acd3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/32f8e3e30ca8420e98d3a7902fe9acd3 2024-12-07T18:19:29,655 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/8be6892e7e2d413e81d75122f9d9f330 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8be6892e7e2d413e81d75122f9d9f330 2024-12-07T18:19:29,660 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/B of a05999984107cee49bb0b7292dd34cbb into 32f8e3e30ca8420e98d3a7902fe9acd3(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:29,660 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/A of a05999984107cee49bb0b7292dd34cbb into 8be6892e7e2d413e81d75122f9d9f330(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:29,661 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/B, priority=12, startTime=1733595569227; duration=0sec 2024-12-07T18:19:29,661 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/A, priority=12, startTime=1733595569227; duration=0sec 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:B 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:A 2024-12-07T18:19:29,661 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:29,662 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:29,662 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): a05999984107cee49bb0b7292dd34cbb/C is initiating minor compaction (all files) 2024-12-07T18:19:29,662 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a05999984107cee49bb0b7292dd34cbb/C in TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:29,662 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/1cab62144eb34380b6e422300e7eed5b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp, totalSize=49.2 K 2024-12-07T18:19:29,663 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cab62144eb34380b6e422300e7eed5b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733595565406 2024-12-07T18:19:29,663 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7abbbf9d44b840c2bf66d42f1ee5d2bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733595565773 2024-12-07T18:19:29,663 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f486700a80b432ab180e71b013cfc3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=550, earliestPutTs=1733595566405 2024-12-07T18:19:29,664 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 25dddf5e1b374e3588b98d2bf58ac1ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1733595567557 2024-12-07T18:19:29,671 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a05999984107cee49bb0b7292dd34cbb#C#compaction#119 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:29,672 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/206ace2d4333439bb8432968b5b870e6 is 50, key is test_row_0/C:col10/1733595568333/Put/seqid=0 2024-12-07T18:19:29,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741958_1134 (size=13629) 2024-12-07T18:19:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:29,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:29,688 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b2c1d to 127.0.0.1:56016 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:29,688 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:29,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c14d8eea347b4ece909b6f9df0c1e3db is 50, key is test_row_0/A:col10/1733595569471/Put/seqid=0 2024-12-07T18:19:29,693 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24512372 to 127.0.0.1:56016 2024-12-07T18:19:29,693 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:29,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741959_1135 (size=12301) 2024-12-07T18:19:30,082 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/206ace2d4333439bb8432968b5b870e6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/206ace2d4333439bb8432968b5b870e6 2024-12-07T18:19:30,088 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a05999984107cee49bb0b7292dd34cbb/C of a05999984107cee49bb0b7292dd34cbb into 206ace2d4333439bb8432968b5b870e6(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:30,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:30,088 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb., storeName=a05999984107cee49bb0b7292dd34cbb/C, priority=12, startTime=1733595569227; duration=0sec 2024-12-07T18:19:30,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:30,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a05999984107cee49bb0b7292dd34cbb:C 2024-12-07T18:19:30,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c14d8eea347b4ece909b6f9df0c1e3db 2024-12-07T18:19:30,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/0bfdc78ee5cf459d96956aa48ea18d7b is 50, key is test_row_0/B:col10/1733595569471/Put/seqid=0 2024-12-07T18:19:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741960_1136 (size=12301) 2024-12-07T18:19:30,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/0bfdc78ee5cf459d96956aa48ea18d7b 2024-12-07T18:19:30,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/34523b9971c74036a94532d0df71ceb4 is 50, key is test_row_0/C:col10/1733595569471/Put/seqid=0 2024-12-07T18:19:30,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741961_1137 (size=12301) 2024-12-07T18:19:30,639 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:19:30,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/34523b9971c74036a94532d0df71ceb4 2024-12-07T18:19:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/c14d8eea347b4ece909b6f9df0c1e3db as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c14d8eea347b4ece909b6f9df0c1e3db 2024-12-07T18:19:30,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c14d8eea347b4ece909b6f9df0c1e3db, entries=150, sequenceid=590, filesize=12.0 K 2024-12-07T18:19:30,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/0bfdc78ee5cf459d96956aa48ea18d7b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/0bfdc78ee5cf459d96956aa48ea18d7b 2024-12-07T18:19:30,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/0bfdc78ee5cf459d96956aa48ea18d7b, entries=150, sequenceid=590, filesize=12.0 K 2024-12-07T18:19:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/34523b9971c74036a94532d0df71ceb4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/34523b9971c74036a94532d0df71ceb4 2024-12-07T18:19:30,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/34523b9971c74036a94532d0df71ceb4, entries=150, sequenceid=590, filesize=12.0 K 2024-12-07T18:19:30,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=6.71 KB/6870 for a05999984107cee49bb0b7292dd34cbb in 1255ms, sequenceid=590, compaction requested=false 2024-12-07T18:19:30,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:31,123 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53623ce6 to 127.0.0.1:56016 2024-12-07T18:19:31,123 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:31,144 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d888e3e to 127.0.0.1:56016 2024-12-07T18:19:31,145 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 122 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5080 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4970 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2266 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6798 rows 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2266 2024-12-07T18:19:31,145 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6798 rows 2024-12-07T18:19:31,145 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:19:31,145 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02a08c5a to 127.0.0.1:56016 2024-12-07T18:19:31,145 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:31,153 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:19:31,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:19:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:31,164 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595571164"}]},"ts":"1733595571164"} 2024-12-07T18:19:31,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T18:19:31,165 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:19:31,168 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:19:31,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:19:31,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, UNASSIGN}] 2024-12-07T18:19:31,174 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, UNASSIGN 2024-12-07T18:19:31,175 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=a05999984107cee49bb0b7292dd34cbb, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:31,176 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:19:31,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T18:19:31,331 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:31,333 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:31,333 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:19:31,333 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing a05999984107cee49bb0b7292dd34cbb, disabling compactions & flushes 2024-12-07T18:19:31,334 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. after waiting 0 ms 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:31,334 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing a05999984107cee49bb0b7292dd34cbb 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=A 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=B 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a05999984107cee49bb0b7292dd34cbb, store=C 2024-12-07T18:19:31,334 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:31,338 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0c93ea5c8275485c91fe68df8115aab0 is 50, key is test_row_0/A:col10/1733595571122/Put/seqid=0 2024-12-07T18:19:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741962_1138 (size=12301) 2024-12-07T18:19:31,345 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0c93ea5c8275485c91fe68df8115aab0 2024-12-07T18:19:31,353 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/3380d123e875416895b3809b68223cb0 is 50, key is test_row_0/B:col10/1733595571122/Put/seqid=0 2024-12-07T18:19:31,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741963_1139 (size=12301) 2024-12-07T18:19:31,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T18:19:31,758 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/3380d123e875416895b3809b68223cb0 2024-12-07T18:19:31,766 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/404f85d92efa466291313a49f282bbc1 is 50, key is test_row_0/C:col10/1733595571122/Put/seqid=0 2024-12-07T18:19:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T18:19:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741964_1140 (size=12301) 2024-12-07T18:19:31,772 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/404f85d92efa466291313a49f282bbc1 2024-12-07T18:19:31,777 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/A/0c93ea5c8275485c91fe68df8115aab0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0c93ea5c8275485c91fe68df8115aab0 2024-12-07T18:19:31,782 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0c93ea5c8275485c91fe68df8115aab0, entries=150, sequenceid=597, filesize=12.0 K 2024-12-07T18:19:31,783 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/B/3380d123e875416895b3809b68223cb0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3380d123e875416895b3809b68223cb0 2024-12-07T18:19:31,788 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3380d123e875416895b3809b68223cb0, entries=150, sequenceid=597, filesize=12.0 K 2024-12-07T18:19:31,789 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/.tmp/C/404f85d92efa466291313a49f282bbc1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/404f85d92efa466291313a49f282bbc1 2024-12-07T18:19:31,794 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/404f85d92efa466291313a49f282bbc1, entries=150, sequenceid=597, filesize=12.0 K 2024-12-07T18:19:31,795 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for a05999984107cee49bb0b7292dd34cbb in 461ms, sequenceid=597, compaction requested=true 2024-12-07T18:19:31,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b0044be1e3f247279249754cf1ff1006, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/bbcaaf3191654cb19bb842f21754dc6e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/9bda8bb1c423473daa3769c6649cac0a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/712db7fc3dae43028a6183c882d83239, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/494d79a18b9e4f378f0107cd6ac0532e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7625019760747079d1917e1364c2af5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/00e7d1bc0bd24afbb9f7c5c728d29459, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/43844bf1309c44e294b8ccf15025bbdb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/f64168041d554f47962c8cefda2a844b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b6dd9c530d894f488628748b1814056b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085] to archive 2024-12-07T18:19:31,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:19:31,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8d3c2f3253d441f19f7a1ab4aed2a24d 2024-12-07T18:19:31,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/17799a91678a4d7faa4380e94c747c36 2024-12-07T18:19:31,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/fd0ff6e6704b46faae57e674140727ab 2024-12-07T18:19:31,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b0044be1e3f247279249754cf1ff1006 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b0044be1e3f247279249754cf1ff1006 2024-12-07T18:19:31,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1f5661ce673b4158a064288759e8490a 2024-12-07T18:19:31,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/720f3d6a7b5b4c68b8d8250e301808fe 2024-12-07T18:19:31,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/bbcaaf3191654cb19bb842f21754dc6e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/bbcaaf3191654cb19bb842f21754dc6e 2024-12-07T18:19:31,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0f5f75a21b0e446ea61b3ceb505cf04a 2024-12-07T18:19:31,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c705dce6984e491f8871444bef1b2b1d 2024-12-07T18:19:31,828 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/9bda8bb1c423473daa3769c6649cac0a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/9bda8bb1c423473daa3769c6649cac0a 2024-12-07T18:19:31,831 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1200c8d585b141db936e6dd484883757 2024-12-07T18:19:31,832 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/5b5c51d3dec7475495e5c1e8d972fb4c 2024-12-07T18:19:31,834 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/712db7fc3dae43028a6183c882d83239 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/712db7fc3dae43028a6183c882d83239 2024-12-07T18:19:31,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7952eac8dd24a94b2b2fda2aebddf9f 2024-12-07T18:19:31,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/82933d5060444ff3a939c1dd36dab625 2024-12-07T18:19:31,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/494d79a18b9e4f378f0107cd6ac0532e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/494d79a18b9e4f378f0107cd6ac0532e 2024-12-07T18:19:31,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/13b1a89a3b4641a18bdcece741674a43 2024-12-07T18:19:31,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/afc87b38451a44c491beb63b0e335883 2024-12-07T18:19:31,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7d023e65fe594b149a80d14d03b12402 2024-12-07T18:19:31,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/23b92a40c4804e46a7de22e855bb129c 2024-12-07T18:19:31,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7625019760747079d1917e1364c2af5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b7625019760747079d1917e1364c2af5 2024-12-07T18:19:31,850 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7b8b8eedcee04e98b321c96226deb03d 2024-12-07T18:19:31,852 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/7dc1ee430bde476fb02ee7cfe0514097 2024-12-07T18:19:31,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/00e7d1bc0bd24afbb9f7c5c728d29459 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/00e7d1bc0bd24afbb9f7c5c728d29459 2024-12-07T18:19:31,855 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/14a6f5c812374b61b081dbfab3bffe7e 2024-12-07T18:19:31,857 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1186513eed5d4919ba0c437d40909032 2024-12-07T18:19:31,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/43844bf1309c44e294b8ccf15025bbdb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/43844bf1309c44e294b8ccf15025bbdb 2024-12-07T18:19:31,874 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/64f4ad1acfbb4317a5213a45c5936139 2024-12-07T18:19:31,880 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/245d32a5bf8c4619ba2f6f7f481b2e81 2024-12-07T18:19:31,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/67b41206a3d143ecb73649825f4c569f 2024-12-07T18:19:31,883 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/2beeec1fd7914716a728d6cb0a87dfac 2024-12-07T18:19:31,885 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/f64168041d554f47962c8cefda2a844b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/f64168041d554f47962c8cefda2a844b 2024-12-07T18:19:31,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/e9f33e75d916412cbc3849f72b4ad81e 2024-12-07T18:19:31,888 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8713f94981c04663bd97d59dfa115c8c 2024-12-07T18:19:31,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0dd7f14c829249b492a1aea7ed26b180 2024-12-07T18:19:31,892 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b6dd9c530d894f488628748b1814056b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/b6dd9c530d894f488628748b1814056b 2024-12-07T18:19:31,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/1aa4eabce6d345c39284905429dc5e20 2024-12-07T18:19:31,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/20075169b6e14d51b8a53051a559317e 2024-12-07T18:19:31,898 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/97b2fe9b198c41f4a06e506c048ca085 2024-12-07T18:19:31,923 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/47df46474bda45d5b081f3c9866381c6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8fcac339e65149c4aa5ea282f3512087, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/678d63d98437435db2a578a85b9617b0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28c7150e91664d4b8caa21702fd6c721, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/785b1d28f16e483fa52a229ccf9df31b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a157cb631cd04c3796761bebff1b4a6f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8258ac5b1a694657bf2415b4be18d662, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3e1ea8563d35408da051d6962a37e1d6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2cbc9330936541c58e2437f1bc53e361, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e53a69cbd6af45608650ddb21311cafe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5] to archive 2024-12-07T18:19:31,925 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:19:31,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28504d646c6748738b71ce4afa8de095 2024-12-07T18:19:31,929 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/5a5071e9f2014e02a0503ee2ef627549 2024-12-07T18:19:31,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/47df46474bda45d5b081f3c9866381c6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/47df46474bda45d5b081f3c9866381c6 2024-12-07T18:19:31,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fa35c78b0168499489150428c1c35b94 2024-12-07T18:19:31,940 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2267264741db46c993bce90cb862f75b 2024-12-07T18:19:31,944 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8fcac339e65149c4aa5ea282f3512087 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8fcac339e65149c4aa5ea282f3512087 2024-12-07T18:19:31,946 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77e225aa5dd845faa322463fbbb08983 2024-12-07T18:19:31,947 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e7cdf315e85747928791fbf8748059f9 2024-12-07T18:19:31,950 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/678d63d98437435db2a578a85b9617b0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/678d63d98437435db2a578a85b9617b0 2024-12-07T18:19:31,955 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/f3eb280a4f2f4f679de002204de122e5 2024-12-07T18:19:31,960 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/49a9c87e7afc4ddf841f74db505aef25 2024-12-07T18:19:31,962 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/7a5966e35124418b9aaa1334a453a958 2024-12-07T18:19:31,964 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28c7150e91664d4b8caa21702fd6c721 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/28c7150e91664d4b8caa21702fd6c721 2024-12-07T18:19:31,965 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/97d0ff8c073f4dbfaa19875fa0d259e6 2024-12-07T18:19:31,967 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/6dd5e97ef7924eaa8021d248e9675381 2024-12-07T18:19:31,968 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/785b1d28f16e483fa52a229ccf9df31b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/785b1d28f16e483fa52a229ccf9df31b 2024-12-07T18:19:31,970 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/774bc9febdfe45418cdd4f3571a70af8 2024-12-07T18:19:31,976 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e48b70258d4141b4b4e7a9fd061ec506 2024-12-07T18:19:31,984 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/685fc747bd7643deb2cf123d267b663a 2024-12-07T18:19:31,986 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a157cb631cd04c3796761bebff1b4a6f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a157cb631cd04c3796761bebff1b4a6f 2024-12-07T18:19:31,988 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/9ea2ef2bc09844039a68048d8df39872 2024-12-07T18:19:31,990 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/05a824210c4447b8879d73a3a7750996 2024-12-07T18:19:31,994 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8258ac5b1a694657bf2415b4be18d662 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/8258ac5b1a694657bf2415b4be18d662 2024-12-07T18:19:31,995 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/d5390288eff24da5b2dd2b08665582a1 2024-12-07T18:19:31,997 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/1cc712f0314f45159a16d0b706995f1d 2024-12-07T18:19:31,998 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/72fe2ae22b484cf2a4c09427c3fd3b7c 2024-12-07T18:19:31,999 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3e1ea8563d35408da051d6962a37e1d6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3e1ea8563d35408da051d6962a37e1d6 2024-12-07T18:19:32,000 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/77f9be276ccd42fc973971cba8818c87 2024-12-07T18:19:32,007 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/fefd404f7f104778b9baeb4de51be029 2024-12-07T18:19:32,008 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/80fcd7fc70854452bf62799cc92d1b9c 2024-12-07T18:19:32,010 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2cbc9330936541c58e2437f1bc53e361 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2cbc9330936541c58e2437f1bc53e361 2024-12-07T18:19:32,011 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2281cefccfce4807ac7e2c4427c26635 2024-12-07T18:19:32,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/62d73a0d36494f90874a42b7a7c2f0d5 2024-12-07T18:19:32,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/24c6673674aa4adab9d364097fea5c8a 2024-12-07T18:19:32,015 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e53a69cbd6af45608650ddb21311cafe to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/e53a69cbd6af45608650ddb21311cafe 2024-12-07T18:19:32,017 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/12e5a8cad0c04f6098baae3d0b0b4ac6 2024-12-07T18:19:32,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/a8198e4f975c45519d077e91b93fe2e1 2024-12-07T18:19:32,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/2732237e475e41d598fee997c1596c44 2024-12-07T18:19:32,021 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/ae79578340a1482cbcc9422c248c29c5 2024-12-07T18:19:32,023 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fc6140cb58ba41e7b3bb17f9c2b88ce2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e96f63578e614319be07298676bf0835, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4e5e414b3cd8436bacb8b1fb6dd3cea4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/3d454caad9c24b3cb15a173617f9c379, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/306b6b5c55a04378887a57707c3e9e09, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/c7c0d51da1264074aa540b299917fdf6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdbebd756eb7487d93ddf1d187e0c7d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/45f6b860f13e402e97da09f42db710a6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/70ab781ead45467fa2171104ce3903f8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/1cab62144eb34380b6e422300e7eed5b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea] to archive 2024-12-07T18:19:32,024 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:19:32,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/a806839f192f41b5b719b21156ad470a 2024-12-07T18:19:32,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ea707277c19644acbec4f4508ded7b9e 2024-12-07T18:19:32,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fc6140cb58ba41e7b3bb17f9c2b88ce2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fc6140cb58ba41e7b3bb17f9c2b88ce2 2024-12-07T18:19:32,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9aee3f1ebea642fe87f51cb142d011c9 2024-12-07T18:19:32,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/b700d8ef54714a9e9b5cb7571eec42d9 2024-12-07T18:19:32,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/69fcc5ee25c3444ea73fb043d9a96ca5 2024-12-07T18:19:32,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e96f63578e614319be07298676bf0835 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e96f63578e614319be07298676bf0835 2024-12-07T18:19:32,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/684c0cc8a6b14c558400a055da38c3bf 2024-12-07T18:19:32,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/20382f47d331483ea73c7091e96271a8 2024-12-07T18:19:32,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4e5e414b3cd8436bacb8b1fb6dd3cea4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4e5e414b3cd8436bacb8b1fb6dd3cea4 2024-12-07T18:19:32,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/5e85ddd0e20344458c78443630a3aec4 2024-12-07T18:19:32,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e3354f7298724d5699837416cd3a6938 2024-12-07T18:19:32,046 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/3d454caad9c24b3cb15a173617f9c379 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/3d454caad9c24b3cb15a173617f9c379 2024-12-07T18:19:32,049 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ad7ef3989b3541d9b913afe2271536a6 2024-12-07T18:19:32,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/643b4938ca8643709aec264d67ccd5ae 2024-12-07T18:19:32,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/306b6b5c55a04378887a57707c3e9e09 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/306b6b5c55a04378887a57707c3e9e09 2024-12-07T18:19:32,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/00e28abc7fed4c0b99803c8ff2aee4e3 2024-12-07T18:19:32,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e5c026805770484d963601dbaa15ebc2 2024-12-07T18:19:32,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fca7a027e63a412290bd2d12c8c86d1e 2024-12-07T18:19:32,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/c7c0d51da1264074aa540b299917fdf6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/c7c0d51da1264074aa540b299917fdf6 2024-12-07T18:19:32,060 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/818fbb07f43640889078adc15bd36280 2024-12-07T18:19:32,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/ed8e2feb7d9447f8b89e0c58ea1c76d8 2024-12-07T18:19:32,063 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdbebd756eb7487d93ddf1d187e0c7d5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdbebd756eb7487d93ddf1d187e0c7d5 2024-12-07T18:19:32,068 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/43c2800e213e4bd09045e423e0f0bf66 2024-12-07T18:19:32,070 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/85c2f87d63964930833a3eae202e48b5 2024-12-07T18:19:32,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/e99a873e949e43d1aa3d039dbf1325e7 2024-12-07T18:19:32,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/45f6b860f13e402e97da09f42db710a6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/45f6b860f13e402e97da09f42db710a6 2024-12-07T18:19:32,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fe114c60dd374f82bab6a31d3b5f58ca 2024-12-07T18:19:32,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/fdcb1a01a7014fa5bc9f9496fd2cc454 2024-12-07T18:19:32,087 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6c3c234a69e94bbb872e054a4e67b0d9 2024-12-07T18:19:32,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/70ab781ead45467fa2171104ce3903f8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/70ab781ead45467fa2171104ce3903f8 2024-12-07T18:19:32,090 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7f84b51b6c884b93b83acbb444bf7796 2024-12-07T18:19:32,092 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/9cf043dcb76c4f759f273830039b4d59 2024-12-07T18:19:32,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/4562fa8775da4c29b20e1876b9f9417c 2024-12-07T18:19:32,096 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/1cab62144eb34380b6e422300e7eed5b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/1cab62144eb34380b6e422300e7eed5b 2024-12-07T18:19:32,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/d9b334cc39ee4e8c9a36e0a62c90fb62 2024-12-07T18:19:32,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/7abbbf9d44b840c2bf66d42f1ee5d2bd 2024-12-07T18:19:32,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/6f486700a80b432ab180e71b013cfc3d 2024-12-07T18:19:32,103 DEBUG [StoreCloser-TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/25dddf5e1b374e3588b98d2bf58ac1ea 2024-12-07T18:19:32,110 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/recovered.edits/600.seqid, newMaxSeqId=600, maxSeqId=1 2024-12-07T18:19:32,113 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb. 2024-12-07T18:19:32,113 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for a05999984107cee49bb0b7292dd34cbb: 2024-12-07T18:19:32,115 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:32,116 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=a05999984107cee49bb0b7292dd34cbb, regionState=CLOSED 2024-12-07T18:19:32,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-07T18:19:32,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure a05999984107cee49bb0b7292dd34cbb, server=8a7a030b35db,45237,1733595542335 in 941 msec 2024-12-07T18:19:32,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-07T18:19:32,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a05999984107cee49bb0b7292dd34cbb, UNASSIGN in 946 msec 2024-12-07T18:19:32,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-07T18:19:32,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 954 msec 2024-12-07T18:19:32,126 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595572126"}]},"ts":"1733595572126"} 2024-12-07T18:19:32,127 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:19:32,130 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:19:32,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 972 msec 2024-12-07T18:19:32,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T18:19:32,269 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-07T18:19:32,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:19:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,280 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,281 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-07T18:19:32,287 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:32,292 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/recovered.edits] 2024-12-07T18:19:32,296 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0c93ea5c8275485c91fe68df8115aab0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/0c93ea5c8275485c91fe68df8115aab0 2024-12-07T18:19:32,299 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8be6892e7e2d413e81d75122f9d9f330 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/8be6892e7e2d413e81d75122f9d9f330 2024-12-07T18:19:32,304 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c14d8eea347b4ece909b6f9df0c1e3db to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/A/c14d8eea347b4ece909b6f9df0c1e3db 2024-12-07T18:19:32,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/0bfdc78ee5cf459d96956aa48ea18d7b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/0bfdc78ee5cf459d96956aa48ea18d7b 2024-12-07T18:19:32,309 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/32f8e3e30ca8420e98d3a7902fe9acd3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/32f8e3e30ca8420e98d3a7902fe9acd3 2024-12-07T18:19:32,310 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3380d123e875416895b3809b68223cb0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/B/3380d123e875416895b3809b68223cb0 2024-12-07T18:19:32,313 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/206ace2d4333439bb8432968b5b870e6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/206ace2d4333439bb8432968b5b870e6 2024-12-07T18:19:32,315 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/34523b9971c74036a94532d0df71ceb4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/34523b9971c74036a94532d0df71ceb4 2024-12-07T18:19:32,319 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/404f85d92efa466291313a49f282bbc1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/C/404f85d92efa466291313a49f282bbc1 2024-12-07T18:19:32,328 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/recovered.edits/600.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb/recovered.edits/600.seqid 2024-12-07T18:19:32,329 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/a05999984107cee49bb0b7292dd34cbb 2024-12-07T18:19:32,329 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:19:32,336 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-07T18:19:32,352 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:19:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-07T18:19:32,404 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:19:32,406 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,406 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:19:32,406 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595572406"}]},"ts":"9223372036854775807"} 2024-12-07T18:19:32,410 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:19:32,410 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a05999984107cee49bb0b7292dd34cbb, NAME => 'TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:19:32,410 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:19:32,410 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595572410"}]},"ts":"9223372036854775807"} 2024-12-07T18:19:32,418 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:19:32,421 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 148 msec 2024-12-07T18:19:32,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-07T18:19:32,585 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-07T18:19:32,598 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x72e0059f-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x72e0059f-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;8a7a030b35db:45237-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x72e0059f-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-899140684_22 at /127.0.0.1:47872 [Waiting for operation #386] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x72e0059f-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1715476931_22 at /127.0.0.1:49436 [Waiting for operation #397] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=373 (was 216) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7679 (was 7316) - AvailableMemoryMB LEAK? - 2024-12-07T18:19:32,610 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=373, ProcessCount=11, AvailableMemoryMB=7677 2024-12-07T18:19:32,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:19:32,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:19:32,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:32,614 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:19:32,614 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:32,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-12-07T18:19:32,615 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:19:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T18:19:32,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741965_1141 (size=963) 2024-12-07T18:19:32,624 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:19:32,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741966_1142 (size=53) 2024-12-07T18:19:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T18:19:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 4ae54766e0f6f378fecb09a332e653a1, disabling compactions & flushes 2024-12-07T18:19:33,037 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. after waiting 0 ms 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,037 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,037 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:33,039 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:19:33,039 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595573039"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595573039"}]},"ts":"1733595573039"} 2024-12-07T18:19:33,044 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:19:33,045 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:19:33,046 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595573045"}]},"ts":"1733595573045"} 2024-12-07T18:19:33,047 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:19:33,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, ASSIGN}] 2024-12-07T18:19:33,060 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, ASSIGN 2024-12-07T18:19:33,061 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:19:33,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:33,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:33,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T18:19:33,367 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:33,371 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,371 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:33,371 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,371 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:33,372 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,372 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,373 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,374 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:33,375 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName A 2024-12-07T18:19:33,375 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:33,375 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:33,375 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,376 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:33,377 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName B 2024-12-07T18:19:33,377 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:33,378 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:33,378 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,379 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:33,380 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName C 2024-12-07T18:19:33,380 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:33,380 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:33,380 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,381 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,382 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,384 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:19:33,385 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,387 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:19:33,388 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 4ae54766e0f6f378fecb09a332e653a1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68156881, jitterRate=0.01561667025089264}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:19:33,389 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:33,389 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., pid=37, masterSystemTime=1733595573366 2024-12-07T18:19:33,391 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,391 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,392 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:33,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-07T18:19:33,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 in 179 msec 2024-12-07T18:19:33,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-07T18:19:33,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, ASSIGN in 336 msec 2024-12-07T18:19:33,398 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:19:33,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595573398"}]},"ts":"1733595573398"} 2024-12-07T18:19:33,399 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:19:33,403 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:19:33,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 791 msec 2024-12-07T18:19:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T18:19:33,721 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-07T18:19:33,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x415dec94 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fde36e8 2024-12-07T18:19:33,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f48093f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:33,733 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:33,735 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:33,738 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:19:33,740 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:19:33,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:19:33,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:19:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-07T18:19:33,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741967_1143 (size=999) 2024-12-07T18:19:33,767 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-07T18:19:33,767 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-07T18:19:33,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:19:33,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, REOPEN/MOVE}] 2024-12-07T18:19:33,781 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, REOPEN/MOVE 2024-12-07T18:19:33,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:33,783 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:19:33,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:33,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:33,936 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,936 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:19:33,936 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 4ae54766e0f6f378fecb09a332e653a1, disabling compactions & flushes 2024-12-07T18:19:33,936 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,936 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,936 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. after waiting 0 ms 2024-12-07T18:19:33,936 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,940 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-07T18:19:33,941 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:33,941 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:33,941 WARN [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 4ae54766e0f6f378fecb09a332e653a1 to self. 2024-12-07T18:19:33,943 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:33,943 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=CLOSED 2024-12-07T18:19:33,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-07T18:19:33,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 in 161 msec 2024-12-07T18:19:33,946 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, REOPEN/MOVE; state=CLOSED, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=true 2024-12-07T18:19:34,096 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:19:34,250 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,253 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,253 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:19:34,254 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,254 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:19:34,254 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,254 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,256 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,257 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:34,263 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName A 2024-12-07T18:19:34,264 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:34,265 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:34,265 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,266 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:34,266 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName B 2024-12-07T18:19:34,266 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:34,267 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:34,267 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,267 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:19:34,267 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54766e0f6f378fecb09a332e653a1 columnFamilyName C 2024-12-07T18:19:34,268 DEBUG [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:34,268 INFO [StoreOpener-4ae54766e0f6f378fecb09a332e653a1-1 {}] regionserver.HStore(327): Store=4ae54766e0f6f378fecb09a332e653a1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:19:34,268 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,269 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,270 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,271 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:19:34,272 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,273 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 4ae54766e0f6f378fecb09a332e653a1; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68568817, jitterRate=0.021754994988441467}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:19:34,274 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:34,275 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., pid=42, masterSystemTime=1733595574250 2024-12-07T18:19:34,276 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,276 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,277 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=OPEN, openSeqNum=5, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-12-07T18:19:34,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 in 180 msec 2024-12-07T18:19:34,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-07T18:19:34,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, REOPEN/MOVE in 499 msec 2024-12-07T18:19:34,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-07T18:19:34,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-07T18:19:34,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 536 msec 2024-12-07T18:19:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T18:19:34,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a3d7b93 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@109a98f4 2024-12-07T18:19:34,307 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d3cf478, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,308 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ad21927 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4169e339 2024-12-07T18:19:34,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cd0bf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40e8ce40 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50484683 2024-12-07T18:19:34,318 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f4d875, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,319 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2939e0db to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b64ccdf 2024-12-07T18:19:34,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c3b1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,326 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2362c8ba to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42b9a37d 2024-12-07T18:19:34,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de2fcf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,331 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c645fa1 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@16fb1797 2024-12-07T18:19:34,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c368568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,336 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c317ae0 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5238815e 2024-12-07T18:19:34,343 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aed43b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,345 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a0fc918 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c2b9b76 2024-12-07T18:19:34,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cab9ba4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,351 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77b8b9d2 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37886c78 2024-12-07T18:19:34,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74eb796, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:19:34,361 DEBUG [hconnection-0x1c8dd5d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,361 DEBUG [hconnection-0x764f591a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:34,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-07T18:19:34,363 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,363 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:34,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:34,364 DEBUG [hconnection-0x5cf276d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,364 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:34,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:34,365 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,376 DEBUG [hconnection-0x37dc72de-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,377 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,378 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,378 DEBUG [hconnection-0x477ee78f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,379 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,381 DEBUG [hconnection-0x5d3ff9e0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,384 DEBUG [hconnection-0x3f70508f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,384 DEBUG [hconnection-0x6b0ebcc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,385 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,386 DEBUG [hconnection-0x6fe00eeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:19:34,386 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,387 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,387 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:19:34,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595634448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595634451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595634451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595634454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595634454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:34,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207eb3b5acad0594c45a3ac0dc44c15aa41_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595574393/Put/seqid=0 2024-12-07T18:19:34,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741968_1144 (size=12154) 2024-12-07T18:19:34,488 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:34,493 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207eb3b5acad0594c45a3ac0dc44c15aa41_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207eb3b5acad0594c45a3ac0dc44c15aa41_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,494 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/9ce965c9b0aa4163984e8af9f8adad52, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:34,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/9ce965c9b0aa4163984e8af9f8adad52 is 175, key is test_row_0/A:col10/1733595574393/Put/seqid=0 2024-12-07T18:19:34,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-07T18:19:34,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:34,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741969_1145 (size=30955) 2024-12-07T18:19:34,534 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/9ce965c9b0aa4163984e8af9f8adad52 2024-12-07T18:19:34,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595634557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595634556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595634557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595634557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595634557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/86b3d737efab4957ba598f764e664bc7 is 50, key is test_row_0/B:col10/1733595574393/Put/seqid=0 2024-12-07T18:19:34,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741970_1146 (size=12001) 2024-12-07T18:19:34,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/86b3d737efab4957ba598f764e664bc7 2024-12-07T18:19:34,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/493c7ff6218e4223ab6ed5f45faac508 is 50, key is test_row_0/C:col10/1733595574393/Put/seqid=0 2024-12-07T18:19:34,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741971_1147 (size=12001) 2024-12-07T18:19:34,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/493c7ff6218e4223ab6ed5f45faac508 2024-12-07T18:19:34,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/9ce965c9b0aa4163984e8af9f8adad52 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52 2024-12-07T18:19:34,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52, entries=150, sequenceid=16, filesize=30.2 K 2024-12-07T18:19:34,651 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-07T18:19:34,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/86b3d737efab4957ba598f764e664bc7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7 2024-12-07T18:19:34,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:34,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7, entries=150, sequenceid=16, filesize=11.7 K 2024-12-07T18:19:34,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/493c7ff6218e4223ab6ed5f45faac508 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508 2024-12-07T18:19:34,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-07T18:19:34,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:34,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508, entries=150, sequenceid=16, filesize=11.7 K 2024-12-07T18:19:34,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 4ae54766e0f6f378fecb09a332e653a1 in 284ms, sequenceid=16, compaction requested=false 2024-12-07T18:19:34,685 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-07T18:19:34,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:34,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:19:34,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595634790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072eb0bfcda4b4499e9fc7c69355070472_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595574439/Put/seqid=0 2024-12-07T18:19:34,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741972_1148 (size=14594) 2024-12-07T18:19:34,815 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:34,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595634794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,821 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072eb0bfcda4b4499e9fc7c69355070472_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072eb0bfcda4b4499e9fc7c69355070472_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:34,822 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fadf68f3039447c6a6a89cde73632520, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:34,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fadf68f3039447c6a6a89cde73632520 is 175, key is test_row_0/A:col10/1733595574439/Put/seqid=0 2024-12-07T18:19:34,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-07T18:19:34,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:34,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595634799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:34,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595634799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595634800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741973_1149 (size=39549) 2024-12-07T18:19:34,847 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fadf68f3039447c6a6a89cde73632520 2024-12-07T18:19:34,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8131bb9b1dca43de92196a06b93e384e is 50, key is test_row_0/B:col10/1733595574439/Put/seqid=0 2024-12-07T18:19:34,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741974_1150 (size=12001) 2024-12-07T18:19:34,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8131bb9b1dca43de92196a06b93e384e 2024-12-07T18:19:34,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595634901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2dc8fba10c414cc0bc3980ac48497652 is 50, key is test_row_0/C:col10/1733595574439/Put/seqid=0 2024-12-07T18:19:34,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741975_1151 (size=12001) 2024-12-07T18:19:34,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2dc8fba10c414cc0bc3980ac48497652 2024-12-07T18:19:34,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fadf68f3039447c6a6a89cde73632520 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520 2024-12-07T18:19:34,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595634918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595634934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595634937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:34,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595634938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520, entries=200, sequenceid=42, filesize=38.6 K 2024-12-07T18:19:34,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8131bb9b1dca43de92196a06b93e384e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e 2024-12-07T18:19:34,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e, entries=150, sequenceid=42, filesize=11.7 K 2024-12-07T18:19:34,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2dc8fba10c414cc0bc3980ac48497652 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652 2024-12-07T18:19:34,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652, entries=150, sequenceid=42, filesize=11.7 K 2024-12-07T18:19:34,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 4ae54766e0f6f378fecb09a332e653a1 in 188ms, sequenceid=42, compaction requested=false 2024-12-07T18:19:34,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:34,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:34,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-07T18:19:34,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:34,985 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:19:34,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:34,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:34,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:34,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:34,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:35,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120737f79bd329c0413bb25cc4d133b070d6_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595574795/Put/seqid=0 2024-12-07T18:19:35,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741976_1152 (size=12154) 2024-12-07T18:19:35,021 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T18:19:35,023 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T18:19:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:35,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:35,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595635152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595635153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595635154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595635158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595635159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595635260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595635261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595635264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595635261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595635264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:35,426 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120737f79bd329c0413bb25cc4d133b070d6_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737f79bd329c0413bb25cc4d133b070d6_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:35,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:35,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c is 175, key is test_row_0/A:col10/1733595574795/Put/seqid=0 2024-12-07T18:19:35,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741977_1153 (size=30955) 2024-12-07T18:19:35,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:35,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595635464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595635465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595635467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595635467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595635467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595635772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595635772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595635773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595635773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595635773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:35,854 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c 2024-12-07T18:19:35,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6ace8aa543f346c080ea9387677bbeea is 50, key is test_row_0/B:col10/1733595574795/Put/seqid=0 2024-12-07T18:19:35,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741978_1154 (size=12001) 2024-12-07T18:19:35,893 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6ace8aa543f346c080ea9387677bbeea 2024-12-07T18:19:35,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/73557ff8c42d46788f61c5104c46c34d is 50, key is test_row_0/C:col10/1733595574795/Put/seqid=0 2024-12-07T18:19:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741979_1155 (size=12001) 2024-12-07T18:19:35,922 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/73557ff8c42d46788f61c5104c46c34d 2024-12-07T18:19:35,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c 2024-12-07T18:19:35,935 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c, entries=150, sequenceid=54, filesize=30.2 K 2024-12-07T18:19:35,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6ace8aa543f346c080ea9387677bbeea as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea 2024-12-07T18:19:35,943 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea, entries=150, sequenceid=54, filesize=11.7 K 2024-12-07T18:19:35,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/73557ff8c42d46788f61c5104c46c34d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d 2024-12-07T18:19:35,950 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d, entries=150, sequenceid=54, filesize=11.7 K 2024-12-07T18:19:35,951 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4ae54766e0f6f378fecb09a332e653a1 in 966ms, sequenceid=54, compaction requested=true 2024-12-07T18:19:35,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:35,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:35,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-07T18:19:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-07T18:19:35,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-07T18:19:35,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5890 sec 2024-12-07T18:19:35,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.5980 sec 2024-12-07T18:19:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:36,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120704e150f6ab0d476986690be043840528_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595636292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595636294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595636300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595636300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595636303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741980_1156 (size=12154) 2024-12-07T18:19:36,361 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:36,366 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120704e150f6ab0d476986690be043840528_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120704e150f6ab0d476986690be043840528_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:36,368 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/589a052534d34b299ca515917d0f56ee, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:36,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/589a052534d34b299ca515917d0f56ee is 175, key is test_row_0/A:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:36,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741981_1157 (size=30955) 2024-12-07T18:19:36,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595636404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595636406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595636408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595636408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595636409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-07T18:19:36,470 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-07T18:19:36,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-07T18:19:36,474 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:36,476 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:36,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:36,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595636608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595636612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595636613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595636614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595636614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:36,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:36,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,775 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/589a052534d34b299ca515917d0f56ee 2024-12-07T18:19:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:36,783 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:36,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:36,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5e179bf2fde5440d8595f44f179f9e66 is 50, key is test_row_0/B:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741982_1158 (size=12001) 2024-12-07T18:19:36,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5e179bf2fde5440d8595f44f179f9e66 2024-12-07T18:19:36,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2fc46eee48374c709b487a6c613e7342 is 50, key is test_row_0/C:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:36,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741983_1159 (size=12001) 2024-12-07T18:19:36,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2fc46eee48374c709b487a6c613e7342 2024-12-07T18:19:36,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/589a052534d34b299ca515917d0f56ee as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee 2024-12-07T18:19:36,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee, entries=150, sequenceid=81, filesize=30.2 K 2024-12-07T18:19:36,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5e179bf2fde5440d8595f44f179f9e66 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66 2024-12-07T18:19:36,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66, entries=150, sequenceid=81, filesize=11.7 K 2024-12-07T18:19:36,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2fc46eee48374c709b487a6c613e7342 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342 2024-12-07T18:19:36,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342, entries=150, sequenceid=81, filesize=11.7 K 2024-12-07T18:19:36,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 4ae54766e0f6f378fecb09a332e653a1 in 582ms, sequenceid=81, compaction requested=true 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:36,863 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:36,863 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:36,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:36,865 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:36,865 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:36,865 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,865 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=129.3 K 2024-12-07T18:19:36,865 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,866 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee] 2024-12-07T18:19:36,866 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:36,866 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:36,866 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,866 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=46.9 K 2024-12-07T18:19:36,867 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce965c9b0aa4163984e8af9f8adad52, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595574393 2024-12-07T18:19:36,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b3d737efab4957ba598f764e664bc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595574393 2024-12-07T18:19:36,868 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8131bb9b1dca43de92196a06b93e384e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733595574439 2024-12-07T18:19:36,868 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fadf68f3039447c6a6a89cde73632520, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733595574439 2024-12-07T18:19:36,869 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef6ecc99bc8847bdadf0f7ec31c1fa6c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595574793 2024-12-07T18:19:36,869 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ace8aa543f346c080ea9387677bbeea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595574793 2024-12-07T18:19:36,869 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e179bf2fde5440d8595f44f179f9e66, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:36,869 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 589a052534d34b299ca515917d0f56ee, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:36,893 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:36,903 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#139 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:36,904 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/cb3867eae18141e1afa1a6a1365d90c7 is 50, key is test_row_0/B:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:36,909 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412078e5f48e3b3d8414aa626bd9712a010f9_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:36,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:36,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:36,951 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412078e5f48e3b3d8414aa626bd9712a010f9_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:36,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:36,951 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412078e5f48e3b3d8414aa626bd9712a010f9_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:36,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:36,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:19:36,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741984_1160 (size=12139) 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:36,966 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/cb3867eae18141e1afa1a6a1365d90c7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cb3867eae18141e1afa1a6a1365d90c7 2024-12-07T18:19:36,976 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into cb3867eae18141e1afa1a6a1365d90c7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:36,976 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:36,976 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=12, startTime=1733595576863; duration=0sec 2024-12-07T18:19:36,976 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:36,976 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:36,976 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:36,979 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:36,979 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:36,979 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:36,979 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=46.9 K 2024-12-07T18:19:36,980 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 493c7ff6218e4223ab6ed5f45faac508, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733595574393 2024-12-07T18:19:36,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741985_1161 (size=4469) 2024-12-07T18:19:36,983 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dc8fba10c414cc0bc3980ac48497652, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733595574439 2024-12-07T18:19:36,984 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 73557ff8c42d46788f61c5104c46c34d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733595574793 2024-12-07T18:19:36,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120731af08f757c045eab162f3c024351f2d_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595576924/Put/seqid=0 2024-12-07T18:19:36,987 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fc46eee48374c709b487a6c613e7342, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:36,988 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#138 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:36,990 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/bab7b85167c245f180300dbf7f57867f is 175, key is test_row_0/A:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:37,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595636999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,011 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:37,012 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/b68ed9fff6f64ee7a5e72daf9f40abd4 is 50, key is test_row_0/C:col10/1733595575153/Put/seqid=0 2024-12-07T18:19:37,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595637004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595637007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595637008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595637010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741987_1163 (size=31093) 2024-12-07T18:19:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741986_1162 (size=12154) 2024-12-07T18:19:37,042 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,048 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120731af08f757c045eab162f3c024351f2d_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120731af08f757c045eab162f3c024351f2d_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:37,049 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fbdd0cc6894d4807a80cdf561ad95f29, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:37,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fbdd0cc6894d4807a80cdf561ad95f29 is 175, key is test_row_0/A:col10/1733595576924/Put/seqid=0 2024-12-07T18:19:37,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741988_1164 (size=12139) 2024-12-07T18:19:37,075 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/b68ed9fff6f64ee7a5e72daf9f40abd4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/b68ed9fff6f64ee7a5e72daf9f40abd4 2024-12-07T18:19:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:37,083 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into b68ed9fff6f64ee7a5e72daf9f40abd4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:37,083 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:37,083 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=12, startTime=1733595576863; duration=0sec 2024-12-07T18:19:37,084 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:37,084 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741989_1165 (size=30955) 2024-12-07T18:19:37,104 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fbdd0cc6894d4807a80cdf561ad95f29 2024-12-07T18:19:37,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:37,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:37,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595637115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595637115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595637117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595637117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595637116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/a8c064037ff245ed9fbbe614f2c37bbb is 50, key is test_row_0/B:col10/1733595576924/Put/seqid=0 2024-12-07T18:19:37,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741990_1166 (size=12001) 2024-12-07T18:19:37,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/a8c064037ff245ed9fbbe614f2c37bbb 2024-12-07T18:19:37,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/6dcfb782838847c59a2c1226801d5736 is 50, key is test_row_0/C:col10/1733595576924/Put/seqid=0 2024-12-07T18:19:37,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741991_1167 (size=12001) 2024-12-07T18:19:37,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:37,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:37,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595637322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595637322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595637323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595637324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595637325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:37,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:37,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,432 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/bab7b85167c245f180300dbf7f57867f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f 2024-12-07T18:19:37,437 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into bab7b85167c245f180300dbf7f57867f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:37,437 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:37,437 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=12, startTime=1733595576863; duration=0sec 2024-12-07T18:19:37,437 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:37,437 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:37,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:37,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:37,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:37,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595637626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595637628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595637628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595637629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595637629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/6dcfb782838847c59a2c1226801d5736 2024-12-07T18:19:37,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/fbdd0cc6894d4807a80cdf561ad95f29 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29 2024-12-07T18:19:37,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29, entries=150, sequenceid=95, filesize=30.2 K 2024-12-07T18:19:37,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/a8c064037ff245ed9fbbe614f2c37bbb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb 2024-12-07T18:19:37,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb, entries=150, sequenceid=95, filesize=11.7 K 2024-12-07T18:19:37,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/6dcfb782838847c59a2c1226801d5736 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736 2024-12-07T18:19:37,671 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:19:37,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736, entries=150, sequenceid=95, filesize=11.7 K 2024-12-07T18:19:37,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 4ae54766e0f6f378fecb09a332e653a1 in 756ms, sequenceid=95, compaction requested=false 2024-12-07T18:19:37,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:37,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:37,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-07T18:19:37,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,724 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:37,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:37,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:37,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:37,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:37,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:37,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:37,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072c64f3507b4d404d929dbdadd5737c0f_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595577008/Put/seqid=0 2024-12-07T18:19:37,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741992_1168 (size=12154) 2024-12-07T18:19:37,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,775 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072c64f3507b4d404d929dbdadd5737c0f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072c64f3507b4d404d929dbdadd5737c0f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:37,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/40dda9c55de2452482ed675acb8b9a86, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:37,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/40dda9c55de2452482ed675acb8b9a86 is 175, key is test_row_0/A:col10/1733595577008/Put/seqid=0 2024-12-07T18:19:37,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741993_1169 (size=30955) 2024-12-07T18:19:37,811 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/40dda9c55de2452482ed675acb8b9a86 2024-12-07T18:19:37,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/1e8dd39998ab450cbf2745fa9a4df690 is 50, key is test_row_0/B:col10/1733595577008/Put/seqid=0 2024-12-07T18:19:37,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741994_1170 (size=12001) 2024-12-07T18:19:37,845 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/1e8dd39998ab450cbf2745fa9a4df690 2024-12-07T18:19:37,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1db28cdfecf4820be00661934868a77 is 50, key is test_row_0/C:col10/1733595577008/Put/seqid=0 2024-12-07T18:19:37,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741995_1171 (size=12001) 2024-12-07T18:19:37,885 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1db28cdfecf4820be00661934868a77 2024-12-07T18:19:37,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/40dda9c55de2452482ed675acb8b9a86 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86 2024-12-07T18:19:37,896 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86, entries=150, sequenceid=121, filesize=30.2 K 2024-12-07T18:19:37,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/1e8dd39998ab450cbf2745fa9a4df690 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690 2024-12-07T18:19:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,904 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690, entries=150, sequenceid=121, filesize=11.7 K 2024-12-07T18:19:37,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1db28cdfecf4820be00661934868a77 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77 2024-12-07T18:19:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,911 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77, entries=150, sequenceid=121, filesize=11.7 K 2024-12-07T18:19:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,912 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 4ae54766e0f6f378fecb09a332e653a1 in 188ms, sequenceid=121, compaction requested=true 2024-12-07T18:19:37,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:37,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:37,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-07T18:19:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-07T18:19:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-07T18:19:37,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4380 sec 2024-12-07T18:19:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.4440 sec 2024-12-07T18:19:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:38,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:38,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:38,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:38,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:38,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:38,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b386dcdcb2ae47ba938d569df52b8dc5_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:38,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741996_1172 (size=12304) 2024-12-07T18:19:38,309 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:38,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595638305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595638306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595638307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595638308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,315 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b386dcdcb2ae47ba938d569df52b8dc5_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b386dcdcb2ae47ba938d569df52b8dc5_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:38,317 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/42f1b9cd81754c30bb1f2cc604c50c3a, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:38,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/42f1b9cd81754c30bb1f2cc604c50c3a is 175, key is test_row_0/A:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:38,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595638310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741997_1173 (size=31105) 2024-12-07T18:19:38,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/42f1b9cd81754c30bb1f2cc604c50c3a 2024-12-07T18:19:38,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/49532613318b4eaeb20d83c7530a1b3c is 50, key is test_row_0/B:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741998_1174 (size=12151) 2024-12-07T18:19:38,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595638412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595638414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595638415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595638415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595638421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-07T18:19:38,581 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-07T18:19:38,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:38,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-07T18:19:38,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:38,584 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:38,585 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:38,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:38,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595638616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595638619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595638620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595638627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595638637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:38,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-07T18:19:38,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:38,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:38,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:38,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/49532613318b4eaeb20d83c7530a1b3c 2024-12-07T18:19:38,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd is 50, key is test_row_0/C:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:38,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741999_1175 (size=12151) 2024-12-07T18:19:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:38,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-07T18:19:38,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:38,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:38,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:38,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:38,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595638921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595638923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595638924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595638931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:38,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595638940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-07T18:19:39,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:39,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:39,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-07T18:19:39,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:39,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:39,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd 2024-12-07T18:19:39,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/42f1b9cd81754c30bb1f2cc604c50c3a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a 2024-12-07T18:19:39,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a, entries=150, sequenceid=133, filesize=30.4 K 2024-12-07T18:19:39,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/49532613318b4eaeb20d83c7530a1b3c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c 2024-12-07T18:19:39,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c, entries=150, sequenceid=133, filesize=11.9 K 2024-12-07T18:19:39,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd 2024-12-07T18:19:39,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd, entries=150, sequenceid=133, filesize=11.9 K 2024-12-07T18:19:39,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4ae54766e0f6f378fecb09a332e653a1 in 1019ms, sequenceid=133, compaction requested=true 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:39,247 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:39,247 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:39,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:39,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:39,264 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124108 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:39,264 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:39,264 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,264 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=121.2 K 2024-12-07T18:19:39,264 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,264 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a] 2024-12-07T18:19:39,265 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:39,265 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:39,265 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,265 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cb3867eae18141e1afa1a6a1365d90c7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=47.2 K 2024-12-07T18:19:39,266 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting cb3867eae18141e1afa1a6a1365d90c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:39,267 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab7b85167c245f180300dbf7f57867f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:39,267 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a8c064037ff245ed9fbbe614f2c37bbb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733595576924 2024-12-07T18:19:39,268 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbdd0cc6894d4807a80cdf561ad95f29, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733595576924 2024-12-07T18:19:39,268 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e8dd39998ab450cbf2745fa9a4df690, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595576994 2024-12-07T18:19:39,268 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40dda9c55de2452482ed675acb8b9a86, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595576994 2024-12-07T18:19:39,269 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 49532613318b4eaeb20d83c7530a1b3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:39,269 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42f1b9cd81754c30bb1f2cc604c50c3a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:39,294 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:39,295 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:39,295 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/b936f595824b4da086a37a4781c39a02 is 50, key is test_row_0/B:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:39,304 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412076e8cce5106f14887b833e655fafe8815_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:39,307 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412076e8cce5106f14887b833e655fafe8815_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:39,308 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412076e8cce5106f14887b833e655fafe8815_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742000_1176 (size=12425) 2024-12-07T18:19:39,342 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/b936f595824b4da086a37a4781c39a02 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/b936f595824b4da086a37a4781c39a02 2024-12-07T18:19:39,349 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into b936f595824b4da086a37a4781c39a02(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:39,349 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:39,350 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=12, startTime=1733595579247; duration=0sec 2024-12-07T18:19:39,350 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:39,350 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:39,350 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:39,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-07T18:19:39,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,352 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:39,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:39,354 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:39,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742001_1177 (size=4469) 2024-12-07T18:19:39,354 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:39,354 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:39,355 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/b68ed9fff6f64ee7a5e72daf9f40abd4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=47.2 K 2024-12-07T18:19:39,356 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b68ed9fff6f64ee7a5e72daf9f40abd4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733595575153 2024-12-07T18:19:39,356 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#151 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:39,357 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/c15925fe7ff049c2870428d99f929a25 is 175, key is test_row_0/A:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:39,359 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dcfb782838847c59a2c1226801d5736, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733595576924 2024-12-07T18:19:39,360 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting f1db28cdfecf4820be00661934868a77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595576994 2024-12-07T18:19:39,361 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d0fe2bf9f2b4f919a5ee7f1954d24cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:39,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207c7412a8622564d32afba1eed0fbcdbe5_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595578308/Put/seqid=0 2024-12-07T18:19:39,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742002_1178 (size=31379) 2024-12-07T18:19:39,405 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/c15925fe7ff049c2870428d99f929a25 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25 2024-12-07T18:19:39,409 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:39,410 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/a12e05be9f0c4fb9a9aea4d855994cfb is 50, key is test_row_0/C:col10/1733595578229/Put/seqid=0 2024-12-07T18:19:39,415 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into c15925fe7ff049c2870428d99f929a25(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:39,415 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:39,415 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=12, startTime=1733595579247; duration=0sec 2024-12-07T18:19:39,415 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:39,415 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:39,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:39,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:39,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595639439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595639440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742003_1179 (size=12304) 2024-12-07T18:19:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:39,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595639443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595639443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595639444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,454 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207c7412a8622564d32afba1eed0fbcdbe5_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c7412a8622564d32afba1eed0fbcdbe5_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/74dd3af267e947f890c51b49b330f2f9, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/74dd3af267e947f890c51b49b330f2f9 is 175, key is test_row_0/A:col10/1733595578308/Put/seqid=0 2024-12-07T18:19:39,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742004_1180 (size=12425) 2024-12-07T18:19:39,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742005_1181 (size=31105) 2024-12-07T18:19:39,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595639544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595639545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595639552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595639551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:39,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595639748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595639749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595639755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:39,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595639756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:39,875 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/a12e05be9f0c4fb9a9aea4d855994cfb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/a12e05be9f0c4fb9a9aea4d855994cfb 2024-12-07T18:19:39,881 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/74dd3af267e947f890c51b49b330f2f9 2024-12-07T18:19:39,884 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into a12e05be9f0c4fb9a9aea4d855994cfb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:39,884 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:39,884 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=12, startTime=1733595579247; duration=0sec 2024-12-07T18:19:39,884 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:39,884 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:39,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4a48a6202778478bb093390c6918868e is 50, key is test_row_0/B:col10/1733595578308/Put/seqid=0 2024-12-07T18:19:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742006_1182 (size=12151) 2024-12-07T18:19:39,926 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4a48a6202778478bb093390c6918868e 2024-12-07T18:19:39,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/14fd06169b554e0591ca17f1c40943ee is 50, key is test_row_0/C:col10/1733595578308/Put/seqid=0 2024-12-07T18:19:39,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742007_1183 (size=12151) 2024-12-07T18:19:39,972 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/14fd06169b554e0591ca17f1c40943ee 2024-12-07T18:19:39,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/74dd3af267e947f890c51b49b330f2f9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9 2024-12-07T18:19:39,986 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9, entries=150, sequenceid=158, filesize=30.4 K 2024-12-07T18:19:39,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4a48a6202778478bb093390c6918868e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e 2024-12-07T18:19:40,002 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e, entries=150, sequenceid=158, filesize=11.9 K 2024-12-07T18:19:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/14fd06169b554e0591ca17f1c40943ee as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee 2024-12-07T18:19:40,014 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee, entries=150, sequenceid=158, filesize=11.9 K 2024-12-07T18:19:40,015 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 4ae54766e0f6f378fecb09a332e653a1 in 664ms, sequenceid=158, compaction requested=false 2024-12-07T18:19:40,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:40,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-07T18:19:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-07T18:19:40,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-07T18:19:40,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4360 sec 2024-12-07T18:19:40,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.4410 sec 2024-12-07T18:19:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:40,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-07T18:19:40,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:40,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:40,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:40,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073b513c33c0d14b0a8cd53d67a64bf551_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:40,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742008_1184 (size=12304) 2024-12-07T18:19:40,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595640109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595640111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595640110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595640112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,118 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:40,123 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073b513c33c0d14b0a8cd53d67a64bf551_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073b513c33c0d14b0a8cd53d67a64bf551_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:40,124 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d9b9280c387b495f8e1f922530c2929c, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d9b9280c387b495f8e1f922530c2929c is 175, key is test_row_0/A:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:40,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742009_1185 (size=31105) 2024-12-07T18:19:40,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595640215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595640217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595640217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595640217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595640420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595640423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595640423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595640423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595640457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,544 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d9b9280c387b495f8e1f922530c2929c 2024-12-07T18:19:40,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ac86bddc4f8d443d940c6fbd95f8b5e9 is 50, key is test_row_0/B:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742010_1186 (size=12151) 2024-12-07T18:19:40,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ac86bddc4f8d443d940c6fbd95f8b5e9 2024-12-07T18:19:40,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/35b626b584dc4ac8a1fb6fb1c285e106 is 50, key is test_row_0/C:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:40,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742011_1187 (size=12151) 2024-12-07T18:19:40,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/35b626b584dc4ac8a1fb6fb1c285e106 2024-12-07T18:19:40,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d9b9280c387b495f8e1f922530c2929c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c 2024-12-07T18:19:40,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c, entries=150, sequenceid=175, filesize=30.4 K 2024-12-07T18:19:40,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ac86bddc4f8d443d940c6fbd95f8b5e9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9 2024-12-07T18:19:40,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9, entries=150, sequenceid=175, filesize=11.9 K 2024-12-07T18:19:40,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/35b626b584dc4ac8a1fb6fb1c285e106 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106 2024-12-07T18:19:40,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106, entries=150, sequenceid=175, filesize=11.9 K 2024-12-07T18:19:40,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 626ms, sequenceid=175, compaction requested=true 2024-12-07T18:19:40,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:40,682 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:40,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:40,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:40,683 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:40,684 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:40,684 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:40,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:40,684 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:40,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:40,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:40,684 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=91.4 K 2024-12-07T18:19:40,685 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,685 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c] 2024-12-07T18:19:40,685 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:40,685 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:40,685 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,686 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/b936f595824b4da086a37a4781c39a02, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=35.9 K 2024-12-07T18:19:40,686 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c15925fe7ff049c2870428d99f929a25, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:40,687 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b936f595824b4da086a37a4781c39a02, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:40,687 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74dd3af267e947f890c51b49b330f2f9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733595578302 2024-12-07T18:19:40,687 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a48a6202778478bb093390c6918868e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733595578302 2024-12-07T18:19:40,687 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9b9280c387b495f8e1f922530c2929c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:40,688 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ac86bddc4f8d443d940c6fbd95f8b5e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-07T18:19:40,689 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-07T18:19:40,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-07T18:19:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:40,693 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:40,694 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:40,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:40,700 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,710 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#160 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:40,711 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/056572ae79fb4b8a9f9993afd77d8e8c is 50, key is test_row_0/B:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:40,719 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412077802117cf6984761943837dc3b48627f_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,721 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412077802117cf6984761943837dc3b48627f_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,722 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077802117cf6984761943837dc3b48627f_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:40,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:40,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742012_1188 (size=12527) 2024-12-07T18:19:40,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595640742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595640746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595640748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595640748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207732a671b72cd4d25b6cd30e747cd1095_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595580108/Put/seqid=0 2024-12-07T18:19:40,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742013_1189 (size=4469) 2024-12-07T18:19:40,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742014_1190 (size=14794) 2024-12-07T18:19:40,770 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:40,777 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207732a671b72cd4d25b6cd30e747cd1095_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207732a671b72cd4d25b6cd30e747cd1095_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:40,778 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/276eb85ea44949c2a519695f98858131, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:40,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/276eb85ea44949c2a519695f98858131 is 175, key is test_row_0/A:col10/1733595580108/Put/seqid=0 2024-12-07T18:19:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:40,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742015_1191 (size=39749) 2024-12-07T18:19:40,804 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/276eb85ea44949c2a519695f98858131 2024-12-07T18:19:40,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0b96654623554e5085a2749c90f49893 is 50, key is test_row_0/B:col10/1733595580108/Put/seqid=0 2024-12-07T18:19:40,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742016_1192 (size=12151) 2024-12-07T18:19:40,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0b96654623554e5085a2749c90f49893 2024-12-07T18:19:40,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7c523bec2cd2429ba581149ed3c6e7b7 is 50, key is test_row_0/C:col10/1733595580108/Put/seqid=0 2024-12-07T18:19:40,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742017_1193 (size=12151) 2024-12-07T18:19:40,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-07T18:19:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595640849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595640850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595640853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595640853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:40,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:40,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-07T18:19:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:40,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595641053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595641054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595641057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595641058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,148 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/056572ae79fb4b8a9f9993afd77d8e8c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/056572ae79fb4b8a9f9993afd77d8e8c 2024-12-07T18:19:41,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-07T18:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:41,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:41,156 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into 056572ae79fb4b8a9f9993afd77d8e8c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:41,156 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:41,156 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=13, startTime=1733595580683; duration=0sec 2024-12-07T18:19:41,156 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:41,156 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:41,156 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:41,157 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:41,158 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:41,158 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:41,158 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/a12e05be9f0c4fb9a9aea4d855994cfb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=35.9 K 2024-12-07T18:19:41,159 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a12e05be9f0c4fb9a9aea4d855994cfb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733595578226 2024-12-07T18:19:41,159 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 14fd06169b554e0591ca17f1c40943ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733595578302 2024-12-07T18:19:41,159 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 35b626b584dc4ac8a1fb6fb1c285e106, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:41,166 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#159 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:41,167 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/0e6d756b419a45478e1088d52cc44c50 is 175, key is test_row_0/A:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:41,174 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#164 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:41,175 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/e733b83fae77492db0f72773e1139a56 is 50, key is test_row_0/C:col10/1733595580054/Put/seqid=0 2024-12-07T18:19:41,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742018_1194 (size=31481) 2024-12-07T18:19:41,197 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/0e6d756b419a45478e1088d52cc44c50 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50 2024-12-07T18:19:41,204 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into 0e6d756b419a45478e1088d52cc44c50(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:41,204 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:41,204 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=13, startTime=1733595580682; duration=0sec 2024-12-07T18:19:41,204 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:41,204 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:41,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742019_1195 (size=12527) 2024-12-07T18:19:41,212 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/e733b83fae77492db0f72773e1139a56 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/e733b83fae77492db0f72773e1139a56 2024-12-07T18:19:41,219 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into e733b83fae77492db0f72773e1139a56(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:41,219 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:41,219 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=13, startTime=1733595580684; duration=0sec 2024-12-07T18:19:41,219 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:41,219 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:41,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7c523bec2cd2429ba581149ed3c6e7b7 2024-12-07T18:19:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/276eb85ea44949c2a519695f98858131 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131 2024-12-07T18:19:41,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131, entries=200, sequenceid=198, filesize=38.8 K 2024-12-07T18:19:41,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0b96654623554e5085a2749c90f49893 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893 2024-12-07T18:19:41,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893, entries=150, sequenceid=198, filesize=11.9 K 2024-12-07T18:19:41,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7c523bec2cd2429ba581149ed3c6e7b7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7 2024-12-07T18:19:41,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7, entries=150, sequenceid=198, filesize=11.9 K 2024-12-07T18:19:41,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 4ae54766e0f6f378fecb09a332e653a1 in 543ms, sequenceid=198, compaction requested=false 2024-12-07T18:19:41,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:41,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-07T18:19:41,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:41,306 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:41,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:41,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207215ce0406f3e4d12b024298d0a43789a_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595580744/Put/seqid=0 2024-12-07T18:19:41,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742020_1196 (size=12304) 2024-12-07T18:19:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:41,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:41,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595641377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595641377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595641378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595641382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595641483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595641483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595641484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595641486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595641688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595641688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595641689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595641691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:41,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:41,736 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207215ce0406f3e4d12b024298d0a43789a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207215ce0406f3e4d12b024298d0a43789a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:41,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4b1c15e62e3842f9a95b96c1dee95d26, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:41,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4b1c15e62e3842f9a95b96c1dee95d26 is 175, key is test_row_0/A:col10/1733595580744/Put/seqid=0 2024-12-07T18:19:41,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742021_1197 (size=31105) 2024-12-07T18:19:41,743 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4b1c15e62e3842f9a95b96c1dee95d26 2024-12-07T18:19:41,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5d4fccb1cb9b41058029e06f701aa631 is 50, key is test_row_0/B:col10/1733595580744/Put/seqid=0 2024-12-07T18:19:41,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742022_1198 (size=12151) 2024-12-07T18:19:41,757 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5d4fccb1cb9b41058029e06f701aa631 2024-12-07T18:19:41,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/441e8422c4654ae9be5a161f8fa6a64d is 50, key is test_row_0/C:col10/1733595580744/Put/seqid=0 2024-12-07T18:19:41,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742023_1199 (size=12151) 2024-12-07T18:19:41,794 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/441e8422c4654ae9be5a161f8fa6a64d 2024-12-07T18:19:41,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:41,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4b1c15e62e3842f9a95b96c1dee95d26 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26 2024-12-07T18:19:41,806 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26, entries=150, sequenceid=215, filesize=30.4 K 2024-12-07T18:19:41,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5d4fccb1cb9b41058029e06f701aa631 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631 2024-12-07T18:19:41,812 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631, entries=150, sequenceid=215, filesize=11.9 K 2024-12-07T18:19:41,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/441e8422c4654ae9be5a161f8fa6a64d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d 2024-12-07T18:19:41,819 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d, entries=150, sequenceid=215, filesize=11.9 K 2024-12-07T18:19:41,820 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 515ms, sequenceid=215, compaction requested=true 2024-12-07T18:19:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-07T18:19:41,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-07T18:19:41,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-07T18:19:41,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1280 sec 2024-12-07T18:19:41,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.1330 sec 2024-12-07T18:19:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:41,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:42,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595642003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595642005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595642005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595642005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207baee67034f2d41b6bcda6c2059659c9a_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:42,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742024_1200 (size=19774) 2024-12-07T18:19:42,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595642107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595642107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595642107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595642107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595642309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595642309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595642310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595642312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,422 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,427 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207baee67034f2d41b6bcda6c2059659c9a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207baee67034f2d41b6bcda6c2059659c9a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:42,428 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/eb3dfe4b6768495c8ae24f77b641da3e, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:42,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/eb3dfe4b6768495c8ae24f77b641da3e is 175, key is test_row_0/A:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:42,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742025_1201 (size=57033) 2024-12-07T18:19:42,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595642474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,477 DEBUG [Thread-696 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:42,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595642612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595642613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595642614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595642616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T18:19:42,798 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-07T18:19:42,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-07T18:19:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-07T18:19:42,801 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:42,802 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:42,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:42,835 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/eb3dfe4b6768495c8ae24f77b641da3e 2024-12-07T18:19:42,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/f093cfc24bd6490bacb292f33da9f877 is 50, key is test_row_0/B:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:42,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742026_1202 (size=12151) 2024-12-07T18:19:42,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/f093cfc24bd6490bacb292f33da9f877 2024-12-07T18:19:42,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/d284afe35ba34a49beba9ede21e47f2f is 50, key is test_row_0/C:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-07T18:19:42,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742027_1203 (size=12151) 2024-12-07T18:19:42,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/d284afe35ba34a49beba9ede21e47f2f 2024-12-07T18:19:42,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/eb3dfe4b6768495c8ae24f77b641da3e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e 2024-12-07T18:19:42,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e, entries=300, sequenceid=240, filesize=55.7 K 2024-12-07T18:19:42,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/f093cfc24bd6490bacb292f33da9f877 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877 2024-12-07T18:19:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877, entries=150, sequenceid=240, filesize=11.9 K 2024-12-07T18:19:42,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/d284afe35ba34a49beba9ede21e47f2f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f 2024-12-07T18:19:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f, entries=150, sequenceid=240, filesize=11.9 K 2024-12-07T18:19:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 4ae54766e0f6f378fecb09a332e653a1 in 941ms, sequenceid=240, compaction requested=true 2024-12-07T18:19:42,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:42,936 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:42,936 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:42,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,938 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,938 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:42,938 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:42,938 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/056572ae79fb4b8a9f9993afd77d8e8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=47.8 K 2024-12-07T18:19:42,938 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159368 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:42,939 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:42,939 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:42,939 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=155.6 K 2024-12-07T18:19:42,939 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:42,939 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e] 2024-12-07T18:19:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,939 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 056572ae79fb4b8a9f9993afd77d8e8c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,939 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e6d756b419a45478e1088d52cc44c50, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:42,940 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b96654623554e5085a2749c90f49893, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733595580108 2024-12-07T18:19:42,940 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 276eb85ea44949c2a519695f98858131, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733595580108 2024-12-07T18:19:42,940 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d4fccb1cb9b41058029e06f701aa631, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733595580733 2024-12-07T18:19:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,940 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b1c15e62e3842f9a95b96c1dee95d26, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733595580733 2024-12-07T18:19:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,941 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting f093cfc24bd6490bacb292f33da9f877, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581380 2024-12-07T18:19:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,941 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb3dfe4b6768495c8ae24f77b641da3e, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581373 2024-12-07T18:19:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,954 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-07T18:19:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:42,955 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:19:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:42,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,963 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,968 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#171 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:42,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,968 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/588359da84344ca288f716db24c9805d is 50, key is test_row_0/B:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:42,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,976 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120719e0f548fcaa4baa8597d85029be8d08_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074e663d145737428bab8e40402bea41f2_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_1/A:col10/1733595582001/Put/seqid=0 2024-12-07T18:19:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,981 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120719e0f548fcaa4baa8597d85029be8d08_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:42,981 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120719e0f548fcaa4baa8597d85029be8d08_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:42,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742028_1204 (size=12663) 2024-12-07T18:19:43,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742029_1205 (size=9814) 2024-12-07T18:19:43,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742030_1206 (size=4469) 2024-12-07T18:19:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,029 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#172 average throughput is 0.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:43,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,030 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/cf4fc47b494941a0817faded6801498b is 175, key is test_row_0/A:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:43,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742031_1207 (size=31617) 2024-12-07T18:19:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-07T18:19:43,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:43,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:43,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595643197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595643199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595643197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595643203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595643306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595643309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595643316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595643316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-07T18:19:43,407 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/588359da84344ca288f716db24c9805d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/588359da84344ca288f716db24c9805d 2024-12-07T18:19:43,413 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into 588359da84344ca288f716db24c9805d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:43,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:43,413 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=12, startTime=1733595582936; duration=0sec 2024-12-07T18:19:43,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:43,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:43,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:43,415 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:43,415 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:43,416 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:43,416 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/e733b83fae77492db0f72773e1139a56, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=47.8 K 2024-12-07T18:19:43,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,417 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e733b83fae77492db0f72773e1139a56, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733595579441 2024-12-07T18:19:43,418 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c523bec2cd2429ba581149ed3c6e7b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733595580108 2024-12-07T18:19:43,418 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 441e8422c4654ae9be5a161f8fa6a64d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733595580733 2024-12-07T18:19:43,419 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d284afe35ba34a49beba9ede21e47f2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581380 2024-12-07T18:19:43,422 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074e663d145737428bab8e40402bea41f2_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074e663d145737428bab8e40402bea41f2_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:43,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/32484b564aca4ed5a5d7855e72f405a7, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:43,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/32484b564aca4ed5a5d7855e72f405a7 is 175, key is test_row_1/A:col10/1733595582001/Put/seqid=0 2024-12-07T18:19:43,447 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#174 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:43,448 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2a2eb038a5e14cf4825f30b8295e46f7 is 50, key is test_row_0/C:col10/1733595581993/Put/seqid=0 2024-12-07T18:19:43,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742032_1208 (size=22461) 2024-12-07T18:19:43,452 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/32484b564aca4ed5a5d7855e72f405a7 2024-12-07T18:19:43,466 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/cf4fc47b494941a0817faded6801498b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b 2024-12-07T18:19:43,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/e831ac94a4d84906a52e96fdd36a3f81 is 50, key is test_row_1/B:col10/1733595582001/Put/seqid=0 2024-12-07T18:19:43,473 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into cf4fc47b494941a0817faded6801498b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:43,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:43,473 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=12, startTime=1733595582936; duration=0sec 2024-12-07T18:19:43,474 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:43,474 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:43,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742033_1209 (size=12663) 2024-12-07T18:19:43,492 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/2a2eb038a5e14cf4825f30b8295e46f7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2a2eb038a5e14cf4825f30b8295e46f7 2024-12-07T18:19:43,501 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into 2a2eb038a5e14cf4825f30b8295e46f7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:43,501 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:43,501 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=12, startTime=1733595582936; duration=0sec 2024-12-07T18:19:43,503 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:43,503 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:43,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742034_1210 (size=9757) 2024-12-07T18:19:43,507 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/e831ac94a4d84906a52e96fdd36a3f81 2024-12-07T18:19:43,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595643511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595643512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595643519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/1c6ecd89f1d042a08488c99ee995dc3b is 50, key is test_row_1/C:col10/1733595582001/Put/seqid=0 2024-12-07T18:19:43,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595643520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742035_1211 (size=9757) 2024-12-07T18:19:43,558 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/1c6ecd89f1d042a08488c99ee995dc3b 2024-12-07T18:19:43,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/32484b564aca4ed5a5d7855e72f405a7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7 2024-12-07T18:19:43,574 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7, entries=100, sequenceid=251, filesize=21.9 K 2024-12-07T18:19:43,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/e831ac94a4d84906a52e96fdd36a3f81 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81 2024-12-07T18:19:43,584 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81, entries=100, sequenceid=251, filesize=9.5 K 2024-12-07T18:19:43,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/1c6ecd89f1d042a08488c99ee995dc3b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b 2024-12-07T18:19:43,591 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b, entries=100, sequenceid=251, filesize=9.5 K 2024-12-07T18:19:43,592 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 4ae54766e0f6f378fecb09a332e653a1 in 638ms, sequenceid=251, compaction requested=false 2024-12-07T18:19:43,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:43,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:43,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-07T18:19:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-07T18:19:43,596 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-07T18:19:43,596 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 792 msec 2024-12-07T18:19:43,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 798 msec 2024-12-07T18:19:43,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:43,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:43,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:43,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207fcd3fe02d52747a2b41c91d367e4263f_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742036_1212 (size=14994) 2024-12-07T18:19:43,863 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:43,868 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207fcd3fe02d52747a2b41c91d367e4263f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fcd3fe02d52747a2b41c91d367e4263f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:43,870 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f2a83c7b40da423197e05607c43490ee, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:43,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f2a83c7b40da423197e05607c43490ee is 175, key is test_row_0/A:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:43,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595643828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595643885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595643885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595643885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-07T18:19:43,906 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-07T18:19:43,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:43,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742037_1213 (size=39949) 2024-12-07T18:19:43,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-07T18:19:43,909 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f2a83c7b40da423197e05607c43490ee 2024-12-07T18:19:43,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:43,910 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:43,911 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:43,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:43,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0004910ed37740fe93265c3737bc22b1 is 50, key is test_row_0/B:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:43,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742038_1214 (size=12301) 2024-12-07T18:19:43,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595643981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595643986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595643990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:43,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:43,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595643992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:44,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595644185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595644190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595644193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595644194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:44,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0004910ed37740fe93265c3737bc22b1 2024-12-07T18:19:44,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/acff15df6d874ce0b2ea3cba4f9aad47 is 50, key is test_row_0/C:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:44,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742039_1215 (size=12301) 2024-12-07T18:19:44,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/acff15df6d874ce0b2ea3cba4f9aad47 2024-12-07T18:19:44,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f2a83c7b40da423197e05607c43490ee as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee 2024-12-07T18:19:44,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee, entries=200, sequenceid=280, filesize=39.0 K 2024-12-07T18:19:44,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0004910ed37740fe93265c3737bc22b1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1 2024-12-07T18:19:44,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1, entries=150, sequenceid=280, filesize=12.0 K 2024-12-07T18:19:44,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/acff15df6d874ce0b2ea3cba4f9aad47 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47 2024-12-07T18:19:44,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47, entries=150, sequenceid=280, filesize=12.0 K 2024-12-07T18:19:44,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 4ae54766e0f6f378fecb09a332e653a1 in 593ms, sequenceid=280, compaction requested=true 2024-12-07T18:19:44,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:44,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:44,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:19:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-07T18:19:44,411 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:44,411 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:44,412 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:44,412 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:44,412 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,412 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=91.8 K 2024-12-07T18:19:44,412 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,412 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee] 2024-12-07T18:19:44,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:44,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:44,413 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,413 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2a2eb038a5e14cf4825f30b8295e46f7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=33.9 K 2024-12-07T18:19:44,413 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf4fc47b494941a0817faded6801498b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581380 2024-12-07T18:19:44,413 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a2eb038a5e14cf4825f30b8295e46f7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581380 2024-12-07T18:19:44,414 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32484b564aca4ed5a5d7855e72f405a7, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733595582001 2024-12-07T18:19:44,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c6ecd89f1d042a08488c99ee995dc3b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733595582001 2024-12-07T18:19:44,414 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2a83c7b40da423197e05607c43490ee, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:44,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting acff15df6d874ce0b2ea3cba4f9aad47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:44,426 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:44,427 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/d784bf1330f54d90bfed86efdd3c2361 is 50, key is test_row_0/C:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:44,437 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:44,439 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412070fb3c3dc06ea4b739f055c1a6919b141_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:44,442 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412070fb3c3dc06ea4b739f055c1a6919b141_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:44,442 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412070fb3c3dc06ea4b739f055c1a6919b141_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:44,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742040_1216 (size=12915) 2024-12-07T18:19:44,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742041_1217 (size=4469) 2024-12-07T18:19:44,476 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#181 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:44,477 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/c37179a16b044845aaf7f9e2502361ac is 175, key is test_row_0/A:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:44,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:19:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:44,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742042_1218 (size=31869) 2024-12-07T18:19:44,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207ea8aa63949eb44c1b2052d9300653acb_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595583830/Put/seqid=0 2024-12-07T18:19:44,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:44,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742043_1219 (size=12454) 2024-12-07T18:19:44,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,525 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:44,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,534 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207ea8aa63949eb44c1b2052d9300653acb_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ea8aa63949eb44c1b2052d9300653acb_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:44,535 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/5ab8f92fbb284fb2acb8e039767b2ff2, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:44,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/5ab8f92fbb284fb2acb8e039767b2ff2 is 175, key is test_row_0/A:col10/1733595583830/Put/seqid=0 2024-12-07T18:19:44,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595644532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595644534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595644537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595644538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742044_1220 (size=31255) 2024-12-07T18:19:44,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595644640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595644641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595644643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595644646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,677 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595644847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595644848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595644848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:44,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595644853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,861 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/d784bf1330f54d90bfed86efdd3c2361 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d784bf1330f54d90bfed86efdd3c2361 2024-12-07T18:19:44,867 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into d784bf1330f54d90bfed86efdd3c2361(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:44,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:44,867 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=13, startTime=1733595584411; duration=0sec 2024-12-07T18:19:44,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:44,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:44,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:44,868 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:44,868 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:44,868 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,869 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/588359da84344ca288f716db24c9805d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=33.9 K 2024-12-07T18:19:44,869 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 588359da84344ca288f716db24c9805d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733595581380 2024-12-07T18:19:44,870 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e831ac94a4d84906a52e96fdd36a3f81, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733595582001 2024-12-07T18:19:44,870 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0004910ed37740fe93265c3737bc22b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:44,889 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:44,890 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ab25eccf858e43d4884e55c2ad88b462 is 50, key is test_row_0/B:col10/1733595583816/Put/seqid=0 2024-12-07T18:19:44,915 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/c37179a16b044845aaf7f9e2502361ac as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac 2024-12-07T18:19:44,922 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into c37179a16b044845aaf7f9e2502361ac(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:44,922 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:44,922 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=13, startTime=1733595584410; duration=0sec 2024-12-07T18:19:44,922 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:44,922 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:44,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742045_1221 (size=12915) 2024-12-07T18:19:44,962 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/5ab8f92fbb284fb2acb8e039767b2ff2 2024-12-07T18:19:44,985 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:44,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:44,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:44,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:44,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:44,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/cdd47eba13964154ac8e092bbfa2747d is 50, key is test_row_0/B:col10/1733595583830/Put/seqid=0 2024-12-07T18:19:45,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742046_1222 (size=12301) 2024-12-07T18:19:45,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595645156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595645160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595645160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595645160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,346 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ab25eccf858e43d4884e55c2ad88b462 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ab25eccf858e43d4884e55c2ad88b462 2024-12-07T18:19:45,353 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into ab25eccf858e43d4884e55c2ad88b462(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:45,353 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:45,353 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=13, startTime=1733595584411; duration=0sec 2024-12-07T18:19:45,353 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:45,353 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:45,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/cdd47eba13964154ac8e092bbfa2747d 2024-12-07T18:19:45,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/09ae03d76e9a4607aeb7d60f66f13bf6 is 50, key is test_row_0/C:col10/1733595583830/Put/seqid=0 2024-12-07T18:19:45,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742047_1223 (size=12301) 2024-12-07T18:19:45,601 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595645663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595645666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595645666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595645667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/09ae03d76e9a4607aeb7d60f66f13bf6 2024-12-07T18:19:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/5ab8f92fbb284fb2acb8e039767b2ff2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2 2024-12-07T18:19:45,910 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:45,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:45,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:45,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:45,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:45,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2, entries=150, sequenceid=293, filesize=30.5 K 2024-12-07T18:19:45,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/cdd47eba13964154ac8e092bbfa2747d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d 2024-12-07T18:19:45,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d, entries=150, sequenceid=293, filesize=12.0 K 2024-12-07T18:19:45,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/09ae03d76e9a4607aeb7d60f66f13bf6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6 2024-12-07T18:19:45,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6, entries=150, sequenceid=293, filesize=12.0 K 2024-12-07T18:19:45,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 4ae54766e0f6f378fecb09a332e653a1 in 1438ms, sequenceid=293, compaction requested=false 2024-12-07T18:19:45,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:46,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-07T18:19:46,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:46,063 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:46,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:46,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:46,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:46,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:46,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:46,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:46,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074364785948734f3dba89c00e478e921f_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595584528/Put/seqid=0 2024-12-07T18:19:46,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742048_1224 (size=12454) 2024-12-07T18:19:46,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:46,117 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074364785948734f3dba89c00e478e921f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074364785948734f3dba89c00e478e921f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:46,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/faefc5e6132545faab80ecbadd638aa8, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:46,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/faefc5e6132545faab80ecbadd638aa8 is 175, key is test_row_0/A:col10/1733595584528/Put/seqid=0 2024-12-07T18:19:46,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742049_1225 (size=31255) 2024-12-07T18:19:46,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:46,571 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=319, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/faefc5e6132545faab80ecbadd638aa8 2024-12-07T18:19:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8e85fb42d6e9420caf53a455b53ef54b is 50, key is test_row_0/B:col10/1733595584528/Put/seqid=0 2024-12-07T18:19:46,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595646592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742050_1226 (size=12301) 2024-12-07T18:19:46,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595646669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595646671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595646673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595646675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595646695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:46,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:46,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595646899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:47,007 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8e85fb42d6e9420caf53a455b53ef54b 2024-12-07T18:19:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7df6fd6915274cdfbc88a7d5aa35ca34 is 50, key is test_row_0/C:col10/1733595584528/Put/seqid=0 2024-12-07T18:19:47,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742051_1227 (size=12301) 2024-12-07T18:19:47,050 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7df6fd6915274cdfbc88a7d5aa35ca34 2024-12-07T18:19:47,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/faefc5e6132545faab80ecbadd638aa8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8 2024-12-07T18:19:47,064 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8, entries=150, sequenceid=319, filesize=30.5 K 2024-12-07T18:19:47,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/8e85fb42d6e9420caf53a455b53ef54b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b 2024-12-07T18:19:47,071 INFO [master/8a7a030b35db:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T18:19:47,071 INFO [master/8a7a030b35db:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T18:19:47,073 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b, entries=150, sequenceid=319, filesize=12.0 K 2024-12-07T18:19:47,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/7df6fd6915274cdfbc88a7d5aa35ca34 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34 2024-12-07T18:19:47,093 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34, entries=150, sequenceid=319, filesize=12.0 K 2024-12-07T18:19:47,095 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4ae54766e0f6f378fecb09a332e653a1 in 1032ms, sequenceid=319, compaction requested=true 2024-12-07T18:19:47,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:47,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:47,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-07T18:19:47,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-07T18:19:47,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-07T18:19:47,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1880 sec 2024-12-07T18:19:47,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 3.1940 sec 2024-12-07T18:19:47,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:47,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:47,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071eb0775ab762403cb481eb3d660327a2_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:47,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742052_1228 (size=14994) 2024-12-07T18:19:47,290 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:47,297 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071eb0775ab762403cb481eb3d660327a2_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071eb0775ab762403cb481eb3d660327a2_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:47,298 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4f4b6dbd74f045c7a0645d7a2862ae18, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4f4b6dbd74f045c7a0645d7a2862ae18 is 175, key is test_row_0/A:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:47,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:47,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595647301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:47,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742053_1229 (size=39949) 2024-12-07T18:19:47,318 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4f4b6dbd74f045c7a0645d7a2862ae18 2024-12-07T18:19:47,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/65543bd0c0f04365b7a9551e9f1f1dfa is 50, key is test_row_0/B:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:47,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742054_1230 (size=12301) 2024-12-07T18:19:47,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/65543bd0c0f04365b7a9551e9f1f1dfa 2024-12-07T18:19:47,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/070796035a3b4c3e94e6cde865577917 is 50, key is test_row_0/C:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:47,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:47,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595647407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:47,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742055_1231 (size=12301) 2024-12-07T18:19:47,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:47,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595647612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:47,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/070796035a3b4c3e94e6cde865577917 2024-12-07T18:19:47,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4f4b6dbd74f045c7a0645d7a2862ae18 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18 2024-12-07T18:19:47,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18, entries=200, sequenceid=333, filesize=39.0 K 2024-12-07T18:19:47,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/65543bd0c0f04365b7a9551e9f1f1dfa as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa 2024-12-07T18:19:47,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa, entries=150, sequenceid=333, filesize=12.0 K 2024-12-07T18:19:47,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/070796035a3b4c3e94e6cde865577917 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917 2024-12-07T18:19:47,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917, entries=150, sequenceid=333, filesize=12.0 K 2024-12-07T18:19:47,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 646ms, sequenceid=333, compaction requested=true 2024-12-07T18:19:47,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:47,862 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:47,863 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:47,863 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:47,863 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:47,863 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=131.2 K 2024-12-07T18:19:47,863 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:47,863 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18] 2024-12-07T18:19:47,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:47,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:47,864 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:47,864 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c37179a16b044845aaf7f9e2502361ac, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:47,865 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ab8f92fbb284fb2acb8e039767b2ff2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733595583827 2024-12-07T18:19:47,865 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting faefc5e6132545faab80ecbadd638aa8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1733595584528 2024-12-07T18:19:47,866 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:47,866 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:47,866 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:47,866 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ab25eccf858e43d4884e55c2ad88b462, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=48.7 K 2024-12-07T18:19:47,866 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f4b6dbd74f045c7a0645d7a2862ae18, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:47,867 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ab25eccf858e43d4884e55c2ad88b462, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:47,868 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting cdd47eba13964154ac8e092bbfa2747d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733595583827 2024-12-07T18:19:47,868 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e85fb42d6e9420caf53a455b53ef54b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1733595584528 2024-12-07T18:19:47,869 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 65543bd0c0f04365b7a9551e9f1f1dfa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:47,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:47,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:47,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:47,893 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,907 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207dea527d4c1cc429b829a6eb26e78ff88_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,908 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#193 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:47,908 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0298dbf32e5844f58c1556a49d5f5480 is 50, key is test_row_0/B:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:47,910 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207dea527d4c1cc429b829a6eb26e78ff88_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,910 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207dea527d4c1cc429b829a6eb26e78ff88_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:47,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:47,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:47,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:47,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:47,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:47,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742056_1232 (size=13051) 2024-12-07T18:19:47,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207750860b9a9f549f9b5eebc87c3ee1dd3_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595587918/Put/seqid=0 2024-12-07T18:19:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742057_1233 (size=4469) 2024-12-07T18:19:47,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595647978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:47,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742058_1234 (size=14994) 2024-12-07T18:19:47,992 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:47,996 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207750860b9a9f549f9b5eebc87c3ee1dd3_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207750860b9a9f549f9b5eebc87c3ee1dd3_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:47,998 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/14db3b8dbe71435ab9f39c096bbdde27, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:47,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/14db3b8dbe71435ab9f39c096bbdde27 is 175, key is test_row_0/A:col10/1733595587918/Put/seqid=0 2024-12-07T18:19:48,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-07T18:19:48,021 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-07T18:19:48,022 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:48,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-07T18:19:48,024 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:48,025 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:48,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:48,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:48,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742059_1235 (size=39949) 2024-12-07T18:19:48,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595648084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:48,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595648288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:48,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,360 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0298dbf32e5844f58c1556a49d5f5480 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0298dbf32e5844f58c1556a49d5f5480 2024-12-07T18:19:48,366 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into 0298dbf32e5844f58c1556a49d5f5480(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:48,366 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:48,366 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=12, startTime=1733595587864; duration=0sec 2024-12-07T18:19:48,366 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:48,366 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:48,366 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:48,374 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:48,374 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:48,374 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,374 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d784bf1330f54d90bfed86efdd3c2361, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=48.7 K 2024-12-07T18:19:48,375 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d784bf1330f54d90bfed86efdd3c2361, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733595583157 2024-12-07T18:19:48,376 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ae03d76e9a4607aeb7d60f66f13bf6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733595583827 2024-12-07T18:19:48,376 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7df6fd6915274cdfbc88a7d5aa35ca34, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1733595584528 2024-12-07T18:19:48,380 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 070796035a3b4c3e94e6cde865577917, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:48,381 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#192 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:48,382 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/749c29c126be48a4a818c83c2a8323e4 is 175, key is test_row_0/A:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:48,406 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#195 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:48,407 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/dc731078d6b24a9c9966268fea54debf is 50, key is test_row_0/C:col10/1733595586569/Put/seqid=0 2024-12-07T18:19:48,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742060_1236 (size=32005) 2024-12-07T18:19:48,433 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/749c29c126be48a4a818c83c2a8323e4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4 2024-12-07T18:19:48,447 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/14db3b8dbe71435ab9f39c096bbdde27 2024-12-07T18:19:48,448 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into 749c29c126be48a4a818c83c2a8323e4(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:48,448 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:48,449 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=12, startTime=1733595587861; duration=0sec 2024-12-07T18:19:48,449 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:48,449 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:48,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742061_1237 (size=13051) 2024-12-07T18:19:48,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3f0397ef63a9418782ddfbf99166b935 is 50, key is test_row_0/B:col10/1733595587918/Put/seqid=0 2024-12-07T18:19:48,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742062_1238 (size=12301) 2024-12-07T18:19:48,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3f0397ef63a9418782ddfbf99166b935 2024-12-07T18:19:48,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5ddabdeb9ed6461aab4341ffa5d2f3ed is 50, key is test_row_0/C:col10/1733595587918/Put/seqid=0 2024-12-07T18:19:48,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742063_1239 (size=12301) 2024-12-07T18:19:48,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595648592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:48,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595648690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,691 DEBUG [Thread-694 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:48,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595648692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,694 DEBUG [Thread-698 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:48,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595648693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,694 DEBUG [Thread-702 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:48,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595648696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,699 DEBUG [Thread-700 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:48,792 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,857 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/dc731078d6b24a9c9966268fea54debf as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dc731078d6b24a9c9966268fea54debf 2024-12-07T18:19:48,864 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into dc731078d6b24a9c9966268fea54debf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:48,864 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:48,864 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=12, startTime=1733595587876; duration=0sec 2024-12-07T18:19:48,864 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:48,864 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:48,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5ddabdeb9ed6461aab4341ffa5d2f3ed 2024-12-07T18:19:48,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/14db3b8dbe71435ab9f39c096bbdde27 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27 2024-12-07T18:19:48,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27, entries=200, sequenceid=356, filesize=39.0 K 2024-12-07T18:19:48,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3f0397ef63a9418782ddfbf99166b935 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935 2024-12-07T18:19:48,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935, entries=150, sequenceid=356, filesize=12.0 K 2024-12-07T18:19:48,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/5ddabdeb9ed6461aab4341ffa5d2f3ed as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed 2024-12-07T18:19:48,953 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:48,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:48,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:48,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:48,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed, entries=150, sequenceid=356, filesize=12.0 K 2024-12-07T18:19:48,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4ae54766e0f6f378fecb09a332e653a1 in 1039ms, sequenceid=356, compaction requested=false 2024-12-07T18:19:48,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:49,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:49,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:49,106 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207173f7c92be784c7b9fd1ac323581028c_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:49,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742064_1240 (size=12454) 2024-12-07T18:19:49,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:49,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595649167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:49,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595649270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:49,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595649473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,517 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:49,522 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207173f7c92be784c7b9fd1ac323581028c_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207173f7c92be784c7b9fd1ac323581028c_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:49,523 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/597ecf428d5f4812b22c290ade772412, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:49,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/597ecf428d5f4812b22c290ade772412 is 175, key is test_row_0/A:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742065_1241 (size=31255) 2024-12-07T18:19:49,550 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/597ecf428d5f4812b22c290ade772412 2024-12-07T18:19:49,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/de3c8399bfef4cd5b04435f71119e4af is 50, key is test_row_0/B:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:49,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742066_1242 (size=12301) 2024-12-07T18:19:49,721 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595649779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,875 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:49,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:49,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:49,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:49,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/de3c8399bfef4cd5b04435f71119e4af 2024-12-07T18:19:49,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/8c4c05cd8042496f8e56dcf0ef02e5c7 is 50, key is test_row_0/C:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:49,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742067_1243 (size=12301) 2024-12-07T18:19:49,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/8c4c05cd8042496f8e56dcf0ef02e5c7 2024-12-07T18:19:49,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/597ecf428d5f4812b22c290ade772412 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412 2024-12-07T18:19:50,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412, entries=150, sequenceid=373, filesize=30.5 K 2024-12-07T18:19:50,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/de3c8399bfef4cd5b04435f71119e4af as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af 2024-12-07T18:19:50,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af, entries=150, sequenceid=373, filesize=12.0 K 2024-12-07T18:19:50,037 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:50,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:50,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:50,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:50,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/8c4c05cd8042496f8e56dcf0ef02e5c7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7 2024-12-07T18:19:50,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:50,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7, entries=150, sequenceid=373, filesize=12.0 K 2024-12-07T18:19:50,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 949ms, sequenceid=373, compaction requested=true 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:50,050 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:50,050 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:50,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:50,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:50,051 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,051 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=100.8 K 2024-12-07T18:19:50,051 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412] 2024-12-07T18:19:50,052 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:50,052 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:50,052 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,052 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0298dbf32e5844f58c1556a49d5f5480, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=36.8 K 2024-12-07T18:19:50,053 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 749c29c126be48a4a818c83c2a8323e4, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:50,053 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0298dbf32e5844f58c1556a49d5f5480, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:50,054 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f0397ef63a9418782ddfbf99166b935, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733595587268 2024-12-07T18:19:50,054 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14db3b8dbe71435ab9f39c096bbdde27, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733595587268 2024-12-07T18:19:50,055 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting de3c8399bfef4cd5b04435f71119e4af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:50,055 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 597ecf428d5f4812b22c290ade772412, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:50,077 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#201 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:50,078 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ef77a7e36d214947b2f07d08f0a914da is 50, key is test_row_0/B:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:50,079 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:50,092 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120794e0ea216ebc4a0e8b83f87e6a8c0c73_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:50,095 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120794e0ea216ebc4a0e8b83f87e6a8c0c73_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:50,095 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120794e0ea216ebc4a0e8b83f87e6a8c0c73_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:50,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742068_1244 (size=13153) 2024-12-07T18:19:50,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:50,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742069_1245 (size=4469) 2024-12-07T18:19:50,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:50,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-07T18:19:50,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,193 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:50,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:50,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077dd70d712e9c4f31b18d1799ce2adfc6_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595589156/Put/seqid=0 2024-12-07T18:19:50,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742070_1246 (size=12454) 2024-12-07T18:19:50,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:50,233 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077dd70d712e9c4f31b18d1799ce2adfc6_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077dd70d712e9c4f31b18d1799ce2adfc6_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:50,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/33a16593e68b4e868bd8ffaf2500b5a5, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:50,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/33a16593e68b4e868bd8ffaf2500b5a5 is 175, key is test_row_0/A:col10/1733595589156/Put/seqid=0 2024-12-07T18:19:50,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742071_1247 (size=31255) 2024-12-07T18:19:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:50,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595650351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:50,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595650456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:50,521 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/ef77a7e36d214947b2f07d08f0a914da as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ef77a7e36d214947b2f07d08f0a914da 2024-12-07T18:19:50,535 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#202 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:50,536 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f734ae75e05743aeb943201b0db9a9ab is 175, key is test_row_0/A:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:50,540 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into ef77a7e36d214947b2f07d08f0a914da(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:50,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:50,540 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=13, startTime=1733595590050; duration=0sec 2024-12-07T18:19:50,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:50,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:50,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:19:50,542 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:19:50,542 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:50,542 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:50,542 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dc731078d6b24a9c9966268fea54debf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=36.8 K 2024-12-07T18:19:50,542 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting dc731078d6b24a9c9966268fea54debf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733595586562 2024-12-07T18:19:50,543 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ddabdeb9ed6461aab4341ffa5d2f3ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733595587268 2024-12-07T18:19:50,543 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c4c05cd8042496f8e56dcf0ef02e5c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:50,578 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:50,579 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/c2bcf2ae725b474f872c45fc81a2412e is 50, key is test_row_0/C:col10/1733595589099/Put/seqid=0 2024-12-07T18:19:50,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742072_1248 (size=32107) 2024-12-07T18:19:50,591 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f734ae75e05743aeb943201b0db9a9ab as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab 2024-12-07T18:19:50,598 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into f734ae75e05743aeb943201b0db9a9ab(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:50,598 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:50,598 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=13, startTime=1733595590050; duration=0sec 2024-12-07T18:19:50,599 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:50,599 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742073_1249 (size=13153) 2024-12-07T18:19:50,610 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/c2bcf2ae725b474f872c45fc81a2412e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/c2bcf2ae725b474f872c45fc81a2412e 2024-12-07T18:19:50,616 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into c2bcf2ae725b474f872c45fc81a2412e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:50,616 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:50,616 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=13, startTime=1733595590050; duration=0sec 2024-12-07T18:19:50,616 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:50,616 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:50,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:50,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595650659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:50,681 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/33a16593e68b4e868bd8ffaf2500b5a5 2024-12-07T18:19:50,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4ef5ce23ae0440c08eb54acea668c165 is 50, key is test_row_0/B:col10/1733595589156/Put/seqid=0 2024-12-07T18:19:50,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742074_1250 (size=12301) 2024-12-07T18:19:50,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:50,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595650965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:51,115 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4ef5ce23ae0440c08eb54acea668c165 2024-12-07T18:19:51,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1cdabab58ae43b38035e356703d3270 is 50, key is test_row_0/C:col10/1733595589156/Put/seqid=0 2024-12-07T18:19:51,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742075_1251 (size=12301) 2024-12-07T18:19:51,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:51,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595651469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:51,572 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1cdabab58ae43b38035e356703d3270 2024-12-07T18:19:51,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/33a16593e68b4e868bd8ffaf2500b5a5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5 2024-12-07T18:19:51,581 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5, entries=150, sequenceid=395, filesize=30.5 K 2024-12-07T18:19:51,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/4ef5ce23ae0440c08eb54acea668c165 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165 2024-12-07T18:19:51,587 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165, entries=150, sequenceid=395, filesize=12.0 K 2024-12-07T18:19:51,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/f1cdabab58ae43b38035e356703d3270 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270 2024-12-07T18:19:51,603 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270, entries=150, sequenceid=395, filesize=12.0 K 2024-12-07T18:19:51,604 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 4ae54766e0f6f378fecb09a332e653a1 in 1411ms, sequenceid=395, compaction requested=false 2024-12-07T18:19:51,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:51,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:51,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-07T18:19:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-07T18:19:51,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-07T18:19:51,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5800 sec 2024-12-07T18:19:51,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 3.5850 sec 2024-12-07T18:19:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T18:19:52,133 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-07T18:19:52,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:52,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-07T18:19:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-07T18:19:52,138 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:52,139 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:52,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-07T18:19:52,291 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-07T18:19:52,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:52,292 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:52,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:52,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207dba90ca99c2b49c5a02e132e17a6ad1a_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595590290/Put/seqid=0 2024-12-07T18:19:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742076_1252 (size=12454) 2024-12-07T18:19:52,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:52,326 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207dba90ca99c2b49c5a02e132e17a6ad1a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207dba90ca99c2b49c5a02e132e17a6ad1a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:52,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/09fcbce105294817b52ca37e366bb76a, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:52,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/09fcbce105294817b52ca37e366bb76a is 175, key is test_row_0/A:col10/1733595590290/Put/seqid=0 2024-12-07T18:19:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742077_1253 (size=31255) 2024-12-07T18:19:52,347 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=412, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/09fcbce105294817b52ca37e366bb76a 2024-12-07T18:19:52,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/880f19d41c8a400aa109b4b100fb9214 is 50, key is test_row_0/B:col10/1733595590290/Put/seqid=0 2024-12-07T18:19:52,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742078_1254 (size=12301) 2024-12-07T18:19:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-07T18:19:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:52,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:52,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595652546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595652651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53308 deadline: 1733595652696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,698 DEBUG [Thread-694 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:52,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53358 deadline: 1733595652708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,713 DEBUG [Thread-698 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:52,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53352 deadline: 1733595652713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,715 DEBUG [Thread-702 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:52,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53344 deadline: 1733595652725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,728 DEBUG [Thread-700 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:19:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-07T18:19:52,777 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/880f19d41c8a400aa109b4b100fb9214 2024-12-07T18:19:52,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/3da2699f8960404582c5da4fa2c2394e is 50, key is test_row_0/C:col10/1733595590290/Put/seqid=0 2024-12-07T18:19:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742079_1255 (size=12301) 2024-12-07T18:19:52,840 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/3da2699f8960404582c5da4fa2c2394e 2024-12-07T18:19:52,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/09fcbce105294817b52ca37e366bb76a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a 2024-12-07T18:19:52,855 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a, entries=150, sequenceid=412, filesize=30.5 K 2024-12-07T18:19:52,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/880f19d41c8a400aa109b4b100fb9214 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214 2024-12-07T18:19:52,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595652856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:52,861 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214, entries=150, sequenceid=412, filesize=12.0 K 2024-12-07T18:19:52,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/3da2699f8960404582c5da4fa2c2394e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e 2024-12-07T18:19:52,870 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e, entries=150, sequenceid=412, filesize=12.0 K 2024-12-07T18:19:52,871 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 579ms, sequenceid=412, compaction requested=true 2024-12-07T18:19:52,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:52,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:52,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-07T18:19:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-07T18:19:52,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-07T18:19:52,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 734 msec 2024-12-07T18:19:52,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 740 msec 2024-12-07T18:19:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:53,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412075ad9655e9cf94647b9c7ff9550f97b6d_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595653207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742080_1256 (size=14994) 2024-12-07T18:19:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-07T18:19:53,241 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-07T18:19:53,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:19:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-07T18:19:53,245 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:19:53,246 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:19:53,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:19:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:53,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595653313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:53,399 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:53,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595653517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:53,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:53,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:53,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,616 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:53,628 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412075ad9655e9cf94647b9c7ff9550f97b6d_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075ad9655e9cf94647b9c7ff9550f97b6d_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:53,630 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/3041b17d848a4365a0bd59a5d84c1ec8, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:53,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/3041b17d848a4365a0bd59a5d84c1ec8 is 175, key is test_row_0/A:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742081_1257 (size=39949) 2024-12-07T18:19:53,658 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/3041b17d848a4365a0bd59a5d84c1ec8 2024-12-07T18:19:53,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3cbdd032702342fb810834b40fe2deef is 50, key is test_row_0/B:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742082_1258 (size=12301) 2024-12-07T18:19:53,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3cbdd032702342fb810834b40fe2deef 2024-12-07T18:19:53,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/911c48c5509e4b38ab4b9b91e5395114 is 50, key is test_row_0/C:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742083_1259 (size=12301) 2024-12-07T18:19:53,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/911c48c5509e4b38ab4b9b91e5395114 2024-12-07T18:19:53,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/3041b17d848a4365a0bd59a5d84c1ec8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8 2024-12-07T18:19:53,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8, entries=200, sequenceid=435, filesize=39.0 K 2024-12-07T18:19:53,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/3cbdd032702342fb810834b40fe2deef as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef 2024-12-07T18:19:53,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef, entries=150, sequenceid=435, filesize=12.0 K 2024-12-07T18:19:53,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/911c48c5509e4b38ab4b9b91e5395114 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114 2024-12-07T18:19:53,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114, entries=150, sequenceid=435, filesize=12.0 K 2024-12-07T18:19:53,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4ae54766e0f6f378fecb09a332e653a1 in 644ms, sequenceid=435, compaction requested=true 2024-12-07T18:19:53,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:19:53,809 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ae54766e0f6f378fecb09a332e653a1:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:19:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:53,809 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:53,811 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:53,811 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/B is initiating minor compaction (all files) 2024-12-07T18:19:53,811 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/B in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,811 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ef77a7e36d214947b2f07d08f0a914da, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=48.9 K 2024-12-07T18:19:53,811 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134566 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:53,811 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/A is initiating minor compaction (all files) 2024-12-07T18:19:53,812 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/A in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,812 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=131.4 K 2024-12-07T18:19:53,812 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,812 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8] 2024-12-07T18:19:53,813 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ef77a7e36d214947b2f07d08f0a914da, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:53,813 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting f734ae75e05743aeb943201b0db9a9ab, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:53,813 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ef5ce23ae0440c08eb54acea668c165, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733595589152 2024-12-07T18:19:53,814 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33a16593e68b4e868bd8ffaf2500b5a5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733595589152 2024-12-07T18:19:53,814 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 880f19d41c8a400aa109b4b100fb9214, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733595590290 2024-12-07T18:19:53,815 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09fcbce105294817b52ca37e366bb76a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733595590290 2024-12-07T18:19:53,815 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cbdd032702342fb810834b40fe2deef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733595592541 2024-12-07T18:19:53,815 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3041b17d848a4365a0bd59a5d84c1ec8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733595592533 2024-12-07T18:19:53,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:53,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:19:53,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:53,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:53,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:53,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:53,840 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:53,846 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:53,847 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5ae0281195a3490088ffbebe4fb6271e is 50, key is test_row_0/B:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:53,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b92c81191fc6461ea34537f46fab5754_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595593182/Put/seqid=0 2024-12-07T18:19:53,859 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412071e39b099891242d59da27b7b4cfae844_4ae54766e0f6f378fecb09a332e653a1 store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:53,860 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:53,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:53,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,862 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412071e39b099891242d59da27b7b4cfae844_4ae54766e0f6f378fecb09a332e653a1, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:53,862 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071e39b099891242d59da27b7b4cfae844_4ae54766e0f6f378fecb09a332e653a1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:53,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:53,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:53,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595653905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:53,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742085_1261 (size=14994) 2024-12-07T18:19:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742084_1260 (size=13289) 2024-12-07T18:19:53,924 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/5ae0281195a3490088ffbebe4fb6271e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5ae0281195a3490088ffbebe4fb6271e 2024-12-07T18:19:53,931 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/B of 4ae54766e0f6f378fecb09a332e653a1 into 5ae0281195a3490088ffbebe4fb6271e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:53,931 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:53,931 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/B, priority=12, startTime=1733595593809; duration=0sec 2024-12-07T18:19:53,931 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:19:53,931 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:B 2024-12-07T18:19:53,932 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:19:53,933 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:19:53,933 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 4ae54766e0f6f378fecb09a332e653a1/C is initiating minor compaction (all files) 2024-12-07T18:19:53,934 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4ae54766e0f6f378fecb09a332e653a1/C in TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:53,934 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/c2bcf2ae725b474f872c45fc81a2412e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp, totalSize=48.9 K 2024-12-07T18:19:53,934 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c2bcf2ae725b474f872c45fc81a2412e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733595587935 2024-12-07T18:19:53,935 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting f1cdabab58ae43b38035e356703d3270, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733595589152 2024-12-07T18:19:53,935 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3da2699f8960404582c5da4fa2c2394e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733595590290 2024-12-07T18:19:53,935 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 911c48c5509e4b38ab4b9b91e5395114, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733595592541 2024-12-07T18:19:53,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742086_1262 (size=4469) 2024-12-07T18:19:53,953 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#C#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:53,954 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/35df5a9074a8497285f9078ccde38b1c is 50, key is test_row_0/C:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:53,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742087_1263 (size=13289) 2024-12-07T18:19:53,969 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/35df5a9074a8497285f9078ccde38b1c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35df5a9074a8497285f9078ccde38b1c 2024-12-07T18:19:53,975 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/C of 4ae54766e0f6f378fecb09a332e653a1 into 35df5a9074a8497285f9078ccde38b1c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:53,975 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:53,975 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/C, priority=12, startTime=1733595593809; duration=0sec 2024-12-07T18:19:53,975 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:53,975 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:C 2024-12-07T18:19:54,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:54,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595654007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,014 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:54,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595654209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,313 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:54,318 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b92c81191fc6461ea34537f46fab5754_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b92c81191fc6461ea34537f46fab5754_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:54,319 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f3b73ed35f7343f382b53817384bd533, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:54,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f3b73ed35f7343f382b53817384bd533 is 175, key is test_row_0/A:col10/1733595593182/Put/seqid=0 2024-12-07T18:19:54,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742088_1264 (size=39949) 2024-12-07T18:19:54,332 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=449, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f3b73ed35f7343f382b53817384bd533 2024-12-07T18:19:54,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0a8686fa8b354b4e872faf064e715f3a is 50, key is test_row_0/B:col10/1733595593182/Put/seqid=0 2024-12-07T18:19:54,345 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ae54766e0f6f378fecb09a332e653a1#A#compaction#213 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:19:54,345 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/662f8a76fb434edb9fb4c9035b3c6223 is 175, key is test_row_0/A:col10/1733595592545/Put/seqid=0 2024-12-07T18:19:54,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742089_1265 (size=12301) 2024-12-07T18:19:54,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:54,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742090_1266 (size=32243) 2024-12-07T18:19:54,368 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/662f8a76fb434edb9fb4c9035b3c6223 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/662f8a76fb434edb9fb4c9035b3c6223 2024-12-07T18:19:54,376 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4ae54766e0f6f378fecb09a332e653a1/A of 4ae54766e0f6f378fecb09a332e653a1 into 662f8a76fb434edb9fb4c9035b3c6223(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:19:54,377 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:54,377 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1., storeName=4ae54766e0f6f378fecb09a332e653a1/A, priority=12, startTime=1733595593809; duration=0sec 2024-12-07T18:19:54,377 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:19:54,377 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ae54766e0f6f378fecb09a332e653a1:A 2024-12-07T18:19:54,379 DEBUG [Thread-711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77b8b9d2 to 127.0.0.1:56016 2024-12-07T18:19:54,379 DEBUG [Thread-711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:54,381 DEBUG [Thread-705 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c645fa1 to 127.0.0.1:56016 2024-12-07T18:19:54,381 DEBUG [Thread-705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:54,381 DEBUG [Thread-707 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c317ae0 to 127.0.0.1:56016 2024-12-07T18:19:54,381 DEBUG [Thread-707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:54,382 DEBUG [Thread-709 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a0fc918 to 127.0.0.1:56016 2024-12-07T18:19:54,382 DEBUG [Thread-709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:54,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595654513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,631 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0a8686fa8b354b4e872faf064e715f3a 2024-12-07T18:19:54,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fd327b3ad76349a5a2103820264b9f3f is 50, key is test_row_0/C:col10/1733595593182/Put/seqid=0 2024-12-07T18:19:54,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742091_1267 (size=12301) 2024-12-07T18:19:54,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:54,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:54,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:54,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:54,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:55,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:19:55,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53324 deadline: 1733595655015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:19:55,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:55,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:55,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:55,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:55,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:55,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:55,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:55,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:19:55,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fd327b3ad76349a5a2103820264b9f3f 2024-12-07T18:19:55,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/f3b73ed35f7343f382b53817384bd533 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f3b73ed35f7343f382b53817384bd533 2024-12-07T18:19:55,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f3b73ed35f7343f382b53817384bd533, entries=200, sequenceid=449, filesize=39.0 K 2024-12-07T18:19:55,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/0a8686fa8b354b4e872faf064e715f3a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0a8686fa8b354b4e872faf064e715f3a 2024-12-07T18:19:55,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0a8686fa8b354b4e872faf064e715f3a, entries=150, sequenceid=449, filesize=12.0 K 2024-12-07T18:19:55,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fd327b3ad76349a5a2103820264b9f3f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fd327b3ad76349a5a2103820264b9f3f 2024-12-07T18:19:55,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fd327b3ad76349a5a2103820264b9f3f, entries=150, sequenceid=449, filesize=12.0 K 2024-12-07T18:19:55,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4ae54766e0f6f378fecb09a332e653a1 in 1355ms, sequenceid=449, compaction requested=false 2024-12-07T18:19:55,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:55,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:19:55,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-07T18:19:55,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:55,243 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:19:55,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:19:55,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:55,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:19:55,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:55,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:19:55,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:19:55,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207615ef77d37504b55987e26a026562308_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595593904/Put/seqid=0 2024-12-07T18:19:55,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742092_1268 (size=12454) 2024-12-07T18:19:55,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:55,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:19:55,657 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207615ef77d37504b55987e26a026562308_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207615ef77d37504b55987e26a026562308_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:55,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4d2bccdfdd1e4db69b6b5f47649968b3, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:19:55,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4d2bccdfdd1e4db69b6b5f47649968b3 is 175, key is test_row_0/A:col10/1733595593904/Put/seqid=0 2024-12-07T18:19:55,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742093_1269 (size=31255) 2024-12-07T18:19:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:19:56,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. as already flushing 2024-12-07T18:19:56,026 DEBUG [Thread-696 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ad21927 to 127.0.0.1:56016 2024-12-07T18:19:56,027 DEBUG [Thread-696 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:19:56,063 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=474, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4d2bccdfdd1e4db69b6b5f47649968b3 2024-12-07T18:19:56,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/2e50767aa8804c79841078e875e4c0c7 is 50, key is test_row_0/B:col10/1733595593904/Put/seqid=0 2024-12-07T18:19:56,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742094_1270 (size=12301) 2024-12-07T18:19:56,478 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/2e50767aa8804c79841078e875e4c0c7 2024-12-07T18:19:56,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/dcef9067b02c4f8199968bde5b6ddf9d is 50, key is test_row_0/C:col10/1733595593904/Put/seqid=0 2024-12-07T18:19:56,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742095_1271 (size=12301) 2024-12-07T18:19:56,890 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/dcef9067b02c4f8199968bde5b6ddf9d 2024-12-07T18:19:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/4d2bccdfdd1e4db69b6b5f47649968b3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4d2bccdfdd1e4db69b6b5f47649968b3 2024-12-07T18:19:56,899 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4d2bccdfdd1e4db69b6b5f47649968b3, entries=150, sequenceid=474, filesize=30.5 K 2024-12-07T18:19:56,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/2e50767aa8804c79841078e875e4c0c7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/2e50767aa8804c79841078e875e4c0c7 2024-12-07T18:19:56,904 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/2e50767aa8804c79841078e875e4c0c7, entries=150, sequenceid=474, filesize=12.0 K 2024-12-07T18:19:56,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/dcef9067b02c4f8199968bde5b6ddf9d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dcef9067b02c4f8199968bde5b6ddf9d 2024-12-07T18:19:56,908 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dcef9067b02c4f8199968bde5b6ddf9d, entries=150, sequenceid=474, filesize=12.0 K 2024-12-07T18:19:56,909 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=6.71 KB/6870 for 4ae54766e0f6f378fecb09a332e653a1 in 1666ms, sequenceid=474, compaction requested=true 2024-12-07T18:19:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:19:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:19:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-07T18:19:56,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-07T18:19:56,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-07T18:19:56,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6640 sec 2024-12-07T18:19:56,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 3.6690 sec 2024-12-07T18:19:57,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-07T18:19:57,361 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-07T18:20:00,639 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:20:02,731 DEBUG [Thread-694 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a3d7b93 to 127.0.0.1:56016 2024-12-07T18:20:02,731 DEBUG [Thread-694 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:02,740 DEBUG [Thread-700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2939e0db to 127.0.0.1:56016 2024-12-07T18:20:02,740 DEBUG [Thread-700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:02,788 DEBUG [Thread-698 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40e8ce40 to 127.0.0.1:56016 2024-12-07T18:20:02,788 DEBUG [Thread-698 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:02,813 DEBUG [Thread-702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2362c8ba to 127.0.0.1:56016 2024-12-07T18:20:02,813 DEBUG [Thread-702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 151 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4926 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4703 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2131 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6390 rows 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2169 2024-12-07T18:20:02,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6507 rows 2024-12-07T18:20:02,814 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:20:02,814 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x415dec94 to 127.0.0.1:56016 2024-12-07T18:20:02,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:02,816 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:20:02,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:20:02,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:02,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:02,820 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595602819"}]},"ts":"1733595602819"} 2024-12-07T18:20:02,821 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:20:02,825 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:20:02,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:20:02,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, UNASSIGN}] 2024-12-07T18:20:02,827 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, UNASSIGN 2024-12-07T18:20:02,828 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:02,828 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:20:02,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:02,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:02,980 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:02,980 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:20:02,980 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 4ae54766e0f6f378fecb09a332e653a1, disabling compactions & flushes 2024-12-07T18:20:02,980 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. after waiting 0 ms 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:20:02,981 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing 4ae54766e0f6f378fecb09a332e653a1 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=A 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=B 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4ae54766e0f6f378fecb09a332e653a1, store=C 2024-12-07T18:20:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:02,987 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e8d84528e97e4a1a9578d7e1c82c73ee_4ae54766e0f6f378fecb09a332e653a1 is 50, key is test_row_0/A:col10/1733595602812/Put/seqid=0 2024-12-07T18:20:02,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742096_1272 (size=9914) 2024-12-07T18:20:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:03,392 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:03,396 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e8d84528e97e4a1a9578d7e1c82c73ee_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e8d84528e97e4a1a9578d7e1c82c73ee_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:03,397 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d1038dc88b154b048307d2c33464694e, store: [table=TestAcidGuarantees family=A region=4ae54766e0f6f378fecb09a332e653a1] 2024-12-07T18:20:03,397 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d1038dc88b154b048307d2c33464694e is 175, key is test_row_0/A:col10/1733595602812/Put/seqid=0 2024-12-07T18:20:03,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742097_1273 (size=22561) 2024-12-07T18:20:03,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:03,802 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=482, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d1038dc88b154b048307d2c33464694e 2024-12-07T18:20:03,809 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6500a8fdc58541eabf942af616f5a57f is 50, key is test_row_0/B:col10/1733595602812/Put/seqid=0 2024-12-07T18:20:03,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742098_1274 (size=9857) 2024-12-07T18:20:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:04,213 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6500a8fdc58541eabf942af616f5a57f 2024-12-07T18:20:04,221 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fb32d6fb4c3946a38b63f2c077fcfef4 is 50, key is test_row_0/C:col10/1733595602812/Put/seqid=0 2024-12-07T18:20:04,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742099_1275 (size=9857) 2024-12-07T18:20:04,625 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=482 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fb32d6fb4c3946a38b63f2c077fcfef4 2024-12-07T18:20:04,630 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/A/d1038dc88b154b048307d2c33464694e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d1038dc88b154b048307d2c33464694e 2024-12-07T18:20:04,634 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d1038dc88b154b048307d2c33464694e, entries=100, sequenceid=482, filesize=22.0 K 2024-12-07T18:20:04,634 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/B/6500a8fdc58541eabf942af616f5a57f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6500a8fdc58541eabf942af616f5a57f 2024-12-07T18:20:04,638 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6500a8fdc58541eabf942af616f5a57f, entries=100, sequenceid=482, filesize=9.6 K 2024-12-07T18:20:04,639 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/.tmp/C/fb32d6fb4c3946a38b63f2c077fcfef4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fb32d6fb4c3946a38b63f2c077fcfef4 2024-12-07T18:20:04,643 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fb32d6fb4c3946a38b63f2c077fcfef4, entries=100, sequenceid=482, filesize=9.6 K 2024-12-07T18:20:04,643 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 4ae54766e0f6f378fecb09a332e653a1 in 1662ms, sequenceid=482, compaction requested=true 2024-12-07T18:20:04,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8] to archive 2024-12-07T18:20:04,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:04,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/9ce965c9b0aa4163984e8af9f8adad52 2024-12-07T18:20:04,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fadf68f3039447c6a6a89cde73632520 2024-12-07T18:20:04,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/ef6ecc99bc8847bdadf0f7ec31c1fa6c 2024-12-07T18:20:04,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/bab7b85167c245f180300dbf7f57867f 2024-12-07T18:20:04,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/589a052534d34b299ca515917d0f56ee 2024-12-07T18:20:04,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/fbdd0cc6894d4807a80cdf561ad95f29 2024-12-07T18:20:04,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/40dda9c55de2452482ed675acb8b9a86 2024-12-07T18:20:04,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c15925fe7ff049c2870428d99f929a25 2024-12-07T18:20:04,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/42f1b9cd81754c30bb1f2cc604c50c3a 2024-12-07T18:20:04,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/74dd3af267e947f890c51b49b330f2f9 2024-12-07T18:20:04,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/0e6d756b419a45478e1088d52cc44c50 2024-12-07T18:20:04,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d9b9280c387b495f8e1f922530c2929c 2024-12-07T18:20:04,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/276eb85ea44949c2a519695f98858131 2024-12-07T18:20:04,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4b1c15e62e3842f9a95b96c1dee95d26 2024-12-07T18:20:04,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/eb3dfe4b6768495c8ae24f77b641da3e 2024-12-07T18:20:04,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/cf4fc47b494941a0817faded6801498b 2024-12-07T18:20:04,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/32484b564aca4ed5a5d7855e72f405a7 2024-12-07T18:20:04,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f2a83c7b40da423197e05607c43490ee 2024-12-07T18:20:04,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/c37179a16b044845aaf7f9e2502361ac 2024-12-07T18:20:04,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/5ab8f92fbb284fb2acb8e039767b2ff2 2024-12-07T18:20:04,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/faefc5e6132545faab80ecbadd638aa8 2024-12-07T18:20:04,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4f4b6dbd74f045c7a0645d7a2862ae18 2024-12-07T18:20:04,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/749c29c126be48a4a818c83c2a8323e4 2024-12-07T18:20:04,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/14db3b8dbe71435ab9f39c096bbdde27 2024-12-07T18:20:04,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f734ae75e05743aeb943201b0db9a9ab 2024-12-07T18:20:04,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/597ecf428d5f4812b22c290ade772412 2024-12-07T18:20:04,677 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/33a16593e68b4e868bd8ffaf2500b5a5 2024-12-07T18:20:04,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/09fcbce105294817b52ca37e366bb76a 2024-12-07T18:20:04,680 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/3041b17d848a4365a0bd59a5d84c1ec8 2024-12-07T18:20:04,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cb3867eae18141e1afa1a6a1365d90c7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/b936f595824b4da086a37a4781c39a02, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/056572ae79fb4b8a9f9993afd77d8e8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/588359da84344ca288f716db24c9805d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ab25eccf858e43d4884e55c2ad88b462, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0298dbf32e5844f58c1556a49d5f5480, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ef77a7e36d214947b2f07d08f0a914da, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef] to archive 2024-12-07T18:20:04,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:04,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/86b3d737efab4957ba598f764e664bc7 2024-12-07T18:20:04,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8131bb9b1dca43de92196a06b93e384e 2024-12-07T18:20:04,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6ace8aa543f346c080ea9387677bbeea 2024-12-07T18:20:04,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cb3867eae18141e1afa1a6a1365d90c7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cb3867eae18141e1afa1a6a1365d90c7 2024-12-07T18:20:04,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5e179bf2fde5440d8595f44f179f9e66 2024-12-07T18:20:04,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/a8c064037ff245ed9fbbe614f2c37bbb 2024-12-07T18:20:04,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/1e8dd39998ab450cbf2745fa9a4df690 2024-12-07T18:20:04,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/b936f595824b4da086a37a4781c39a02 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/b936f595824b4da086a37a4781c39a02 2024-12-07T18:20:04,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/49532613318b4eaeb20d83c7530a1b3c 2024-12-07T18:20:04,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4a48a6202778478bb093390c6918868e 2024-12-07T18:20:04,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/056572ae79fb4b8a9f9993afd77d8e8c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/056572ae79fb4b8a9f9993afd77d8e8c 2024-12-07T18:20:04,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ac86bddc4f8d443d940c6fbd95f8b5e9 2024-12-07T18:20:04,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0b96654623554e5085a2749c90f49893 2024-12-07T18:20:04,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5d4fccb1cb9b41058029e06f701aa631 2024-12-07T18:20:04,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/588359da84344ca288f716db24c9805d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/588359da84344ca288f716db24c9805d 2024-12-07T18:20:04,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/f093cfc24bd6490bacb292f33da9f877 2024-12-07T18:20:04,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/e831ac94a4d84906a52e96fdd36a3f81 2024-12-07T18:20:04,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ab25eccf858e43d4884e55c2ad88b462 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ab25eccf858e43d4884e55c2ad88b462 2024-12-07T18:20:04,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0004910ed37740fe93265c3737bc22b1 2024-12-07T18:20:04,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/cdd47eba13964154ac8e092bbfa2747d 2024-12-07T18:20:04,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/8e85fb42d6e9420caf53a455b53ef54b 2024-12-07T18:20:04,710 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0298dbf32e5844f58c1556a49d5f5480 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0298dbf32e5844f58c1556a49d5f5480 2024-12-07T18:20:04,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/65543bd0c0f04365b7a9551e9f1f1dfa 2024-12-07T18:20:04,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3f0397ef63a9418782ddfbf99166b935 2024-12-07T18:20:04,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ef77a7e36d214947b2f07d08f0a914da to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/ef77a7e36d214947b2f07d08f0a914da 2024-12-07T18:20:04,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/de3c8399bfef4cd5b04435f71119e4af 2024-12-07T18:20:04,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/4ef5ce23ae0440c08eb54acea668c165 2024-12-07T18:20:04,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/880f19d41c8a400aa109b4b100fb9214 2024-12-07T18:20:04,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/3cbdd032702342fb810834b40fe2deef 2024-12-07T18:20:04,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/b68ed9fff6f64ee7a5e72daf9f40abd4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/a12e05be9f0c4fb9a9aea4d855994cfb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/e733b83fae77492db0f72773e1139a56, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2a2eb038a5e14cf4825f30b8295e46f7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d784bf1330f54d90bfed86efdd3c2361, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dc731078d6b24a9c9966268fea54debf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/c2bcf2ae725b474f872c45fc81a2412e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114] to archive 2024-12-07T18:20:04,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:04,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/493c7ff6218e4223ab6ed5f45faac508 2024-12-07T18:20:04,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2dc8fba10c414cc0bc3980ac48497652 2024-12-07T18:20:04,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/73557ff8c42d46788f61c5104c46c34d 2024-12-07T18:20:04,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/b68ed9fff6f64ee7a5e72daf9f40abd4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/b68ed9fff6f64ee7a5e72daf9f40abd4 2024-12-07T18:20:04,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2fc46eee48374c709b487a6c613e7342 2024-12-07T18:20:04,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/6dcfb782838847c59a2c1226801d5736 2024-12-07T18:20:04,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1db28cdfecf4820be00661934868a77 2024-12-07T18:20:04,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/a12e05be9f0c4fb9a9aea4d855994cfb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/a12e05be9f0c4fb9a9aea4d855994cfb 2024-12-07T18:20:04,732 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5d0fe2bf9f2b4f919a5ee7f1954d24cd 2024-12-07T18:20:04,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/14fd06169b554e0591ca17f1c40943ee 2024-12-07T18:20:04,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/e733b83fae77492db0f72773e1139a56 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/e733b83fae77492db0f72773e1139a56 2024-12-07T18:20:04,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35b626b584dc4ac8a1fb6fb1c285e106 2024-12-07T18:20:04,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7c523bec2cd2429ba581149ed3c6e7b7 2024-12-07T18:20:04,737 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/441e8422c4654ae9be5a161f8fa6a64d 2024-12-07T18:20:04,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2a2eb038a5e14cf4825f30b8295e46f7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/2a2eb038a5e14cf4825f30b8295e46f7 2024-12-07T18:20:04,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d284afe35ba34a49beba9ede21e47f2f 2024-12-07T18:20:04,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/1c6ecd89f1d042a08488c99ee995dc3b 2024-12-07T18:20:04,742 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d784bf1330f54d90bfed86efdd3c2361 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/d784bf1330f54d90bfed86efdd3c2361 2024-12-07T18:20:04,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/acff15df6d874ce0b2ea3cba4f9aad47 2024-12-07T18:20:04,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/09ae03d76e9a4607aeb7d60f66f13bf6 2024-12-07T18:20:04,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/7df6fd6915274cdfbc88a7d5aa35ca34 2024-12-07T18:20:04,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dc731078d6b24a9c9966268fea54debf to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dc731078d6b24a9c9966268fea54debf 2024-12-07T18:20:04,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/070796035a3b4c3e94e6cde865577917 2024-12-07T18:20:04,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/5ddabdeb9ed6461aab4341ffa5d2f3ed 2024-12-07T18:20:04,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/c2bcf2ae725b474f872c45fc81a2412e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/c2bcf2ae725b474f872c45fc81a2412e 2024-12-07T18:20:04,751 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/8c4c05cd8042496f8e56dcf0ef02e5c7 2024-12-07T18:20:04,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/f1cdabab58ae43b38035e356703d3270 2024-12-07T18:20:04,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/3da2699f8960404582c5da4fa2c2394e 2024-12-07T18:20:04,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/911c48c5509e4b38ab4b9b91e5395114 2024-12-07T18:20:04,759 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits/485.seqid, newMaxSeqId=485, maxSeqId=4 2024-12-07T18:20:04,759 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1. 2024-12-07T18:20:04,759 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 4ae54766e0f6f378fecb09a332e653a1: 2024-12-07T18:20:04,761 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,761 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=4ae54766e0f6f378fecb09a332e653a1, regionState=CLOSED 2024-12-07T18:20:04,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-07T18:20:04,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 4ae54766e0f6f378fecb09a332e653a1, server=8a7a030b35db,45237,1733595542335 in 1.9340 sec 2024-12-07T18:20:04,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-12-07T18:20:04,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4ae54766e0f6f378fecb09a332e653a1, UNASSIGN in 1.9370 sec 2024-12-07T18:20:04,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-07T18:20:04,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9400 sec 2024-12-07T18:20:04,767 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595604767"}]},"ts":"1733595604767"} 2024-12-07T18:20:04,768 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:20:04,770 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:20:04,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9550 sec 2024-12-07T18:20:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-07T18:20:04,923 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-07T18:20:04,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:20:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:04,925 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-07T18:20:04,926 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:04,928 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,930 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits] 2024-12-07T18:20:04,933 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4d2bccdfdd1e4db69b6b5f47649968b3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/4d2bccdfdd1e4db69b6b5f47649968b3 2024-12-07T18:20:04,934 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/662f8a76fb434edb9fb4c9035b3c6223 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/662f8a76fb434edb9fb4c9035b3c6223 2024-12-07T18:20:04,935 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d1038dc88b154b048307d2c33464694e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/d1038dc88b154b048307d2c33464694e 2024-12-07T18:20:04,937 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f3b73ed35f7343f382b53817384bd533 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/A/f3b73ed35f7343f382b53817384bd533 2024-12-07T18:20:04,939 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0a8686fa8b354b4e872faf064e715f3a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/0a8686fa8b354b4e872faf064e715f3a 2024-12-07T18:20:04,940 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/2e50767aa8804c79841078e875e4c0c7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/2e50767aa8804c79841078e875e4c0c7 2024-12-07T18:20:04,941 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5ae0281195a3490088ffbebe4fb6271e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/5ae0281195a3490088ffbebe4fb6271e 2024-12-07T18:20:04,942 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6500a8fdc58541eabf942af616f5a57f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/B/6500a8fdc58541eabf942af616f5a57f 2024-12-07T18:20:04,944 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35df5a9074a8497285f9078ccde38b1c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/35df5a9074a8497285f9078ccde38b1c 2024-12-07T18:20:04,945 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dcef9067b02c4f8199968bde5b6ddf9d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/dcef9067b02c4f8199968bde5b6ddf9d 2024-12-07T18:20:04,946 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fb32d6fb4c3946a38b63f2c077fcfef4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fb32d6fb4c3946a38b63f2c077fcfef4 2024-12-07T18:20:04,947 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fd327b3ad76349a5a2103820264b9f3f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/C/fd327b3ad76349a5a2103820264b9f3f 2024-12-07T18:20:04,950 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits/485.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1/recovered.edits/485.seqid 2024-12-07T18:20:04,950 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,951 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:20:04,951 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:20:04,952 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-07T18:20:04,957 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120704e150f6ab0d476986690be043840528_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120704e150f6ab0d476986690be043840528_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,958 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207173f7c92be784c7b9fd1ac323581028c_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207173f7c92be784c7b9fd1ac323581028c_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,959 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071eb0775ab762403cb481eb3d660327a2_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071eb0775ab762403cb481eb3d660327a2_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,960 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207215ce0406f3e4d12b024298d0a43789a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207215ce0406f3e4d12b024298d0a43789a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,962 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072c64f3507b4d404d929dbdadd5737c0f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072c64f3507b4d404d929dbdadd5737c0f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,963 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072eb0bfcda4b4499e9fc7c69355070472_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072eb0bfcda4b4499e9fc7c69355070472_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,965 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120731af08f757c045eab162f3c024351f2d_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120731af08f757c045eab162f3c024351f2d_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,966 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737f79bd329c0413bb25cc4d133b070d6_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737f79bd329c0413bb25cc4d133b070d6_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,968 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073b513c33c0d14b0a8cd53d67a64bf551_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073b513c33c0d14b0a8cd53d67a64bf551_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,970 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074364785948734f3dba89c00e478e921f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074364785948734f3dba89c00e478e921f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,972 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074e663d145737428bab8e40402bea41f2_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074e663d145737428bab8e40402bea41f2_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,974 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075ad9655e9cf94647b9c7ff9550f97b6d_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075ad9655e9cf94647b9c7ff9550f97b6d_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,975 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207615ef77d37504b55987e26a026562308_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207615ef77d37504b55987e26a026562308_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,977 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207732a671b72cd4d25b6cd30e747cd1095_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207732a671b72cd4d25b6cd30e747cd1095_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,978 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207750860b9a9f549f9b5eebc87c3ee1dd3_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207750860b9a9f549f9b5eebc87c3ee1dd3_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,979 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077dd70d712e9c4f31b18d1799ce2adfc6_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077dd70d712e9c4f31b18d1799ce2adfc6_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,981 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b386dcdcb2ae47ba938d569df52b8dc5_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b386dcdcb2ae47ba938d569df52b8dc5_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,982 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b92c81191fc6461ea34537f46fab5754_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b92c81191fc6461ea34537f46fab5754_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,983 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207baee67034f2d41b6bcda6c2059659c9a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207baee67034f2d41b6bcda6c2059659c9a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,984 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c7412a8622564d32afba1eed0fbcdbe5_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c7412a8622564d32afba1eed0fbcdbe5_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,986 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207dba90ca99c2b49c5a02e132e17a6ad1a_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207dba90ca99c2b49c5a02e132e17a6ad1a_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,987 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e8d84528e97e4a1a9578d7e1c82c73ee_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e8d84528e97e4a1a9578d7e1c82c73ee_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,989 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ea8aa63949eb44c1b2052d9300653acb_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ea8aa63949eb44c1b2052d9300653acb_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,991 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207eb3b5acad0594c45a3ac0dc44c15aa41_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207eb3b5acad0594c45a3ac0dc44c15aa41_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,993 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fcd3fe02d52747a2b41c91d367e4263f_4ae54766e0f6f378fecb09a332e653a1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fcd3fe02d52747a2b41c91d367e4263f_4ae54766e0f6f378fecb09a332e653a1 2024-12-07T18:20:04,993 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:20:04,996 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:04,998 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:20:05,001 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:20:05,002 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:05,002 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:20:05,002 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595605002"}]},"ts":"9223372036854775807"} 2024-12-07T18:20:05,004 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:20:05,004 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4ae54766e0f6f378fecb09a332e653a1, NAME => 'TestAcidGuarantees,,1733595572612.4ae54766e0f6f378fecb09a332e653a1.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:20:05,004 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:20:05,004 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595605004"}]},"ts":"9223372036854775807"} 2024-12-07T18:20:05,006 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:20:05,008 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:05,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 84 msec 2024-12-07T18:20:05,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-07T18:20:05,027 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-07T18:20:05,040 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238 (was 239), OpenFileDescriptor=455 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=481 (was 373) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7070 (was 7677) 2024-12-07T18:20:05,052 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=238, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=481, ProcessCount=11, AvailableMemoryMB=7070 2024-12-07T18:20:05,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:20:05,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:20:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:05,055 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:20:05,055 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:05,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-12-07T18:20:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:05,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:20:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742100_1276 (size=963) 2024-12-07T18:20:05,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:05,466 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:20:05,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742101_1277 (size=53) 2024-12-07T18:20:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 70dba895e74ad497e9ce7e920215ba59, disabling compactions & flushes 2024-12-07T18:20:05,873 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. after waiting 0 ms 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:05,873 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:05,873 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:05,874 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:20:05,874 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595605874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595605874"}]},"ts":"1733595605874"} 2024-12-07T18:20:05,875 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:20:05,875 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:20:05,876 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595605875"}]},"ts":"1733595605875"} 2024-12-07T18:20:05,876 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:20:05,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, ASSIGN}] 2024-12-07T18:20:05,881 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, ASSIGN 2024-12-07T18:20:05,881 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:20:06,032 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=70dba895e74ad497e9ce7e920215ba59, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:06,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure 70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:06,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:06,187 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:06,187 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:20:06,188 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,188 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:20:06,188 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,188 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,190 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,191 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:06,191 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70dba895e74ad497e9ce7e920215ba59 columnFamilyName A 2024-12-07T18:20:06,191 DEBUG [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:06,192 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(327): Store=70dba895e74ad497e9ce7e920215ba59/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:06,192 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,193 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:06,194 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70dba895e74ad497e9ce7e920215ba59 columnFamilyName B 2024-12-07T18:20:06,194 DEBUG [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:06,194 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(327): Store=70dba895e74ad497e9ce7e920215ba59/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:06,194 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,195 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:06,195 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70dba895e74ad497e9ce7e920215ba59 columnFamilyName C 2024-12-07T18:20:06,195 DEBUG [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:06,195 INFO [StoreOpener-70dba895e74ad497e9ce7e920215ba59-1 {}] regionserver.HStore(327): Store=70dba895e74ad497e9ce7e920215ba59/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:06,196 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:06,196 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,197 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,198 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:20:06,199 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:06,201 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:20:06,201 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened 70dba895e74ad497e9ce7e920215ba59; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71564228, jitterRate=0.06639009714126587}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:20:06,202 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:06,203 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., pid=68, masterSystemTime=1733595606184 2024-12-07T18:20:06,204 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:06,204 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:06,204 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=70dba895e74ad497e9ce7e920215ba59, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:06,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-07T18:20:06,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure 70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 in 172 msec 2024-12-07T18:20:06,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-12-07T18:20:06,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, ASSIGN in 326 msec 2024-12-07T18:20:06,208 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:20:06,208 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595606208"}]},"ts":"1733595606208"} 2024-12-07T18:20:06,209 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:20:06,212 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:20:06,212 DEBUG [master/8a7a030b35db:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e84b61442b688dc7f09be574fc7d8389 changed from -1.0 to 0.0, refreshing cache 2024-12-07T18:20:06,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1580 sec 2024-12-07T18:20:07,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-12-07T18:20:07,160 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-12-07T18:20:07,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b01e9bb to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473dfbd2 2024-12-07T18:20:07,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@320146a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,166 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,168 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,169 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:20:07,170 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:20:07,171 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4becc07d to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63a751b9 2024-12-07T18:20:07,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bab3f39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,175 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cc79dd6 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b4256e 2024-12-07T18:20:07,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e48016, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,179 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cd6e3ed to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18ed3e4c 2024-12-07T18:20:07,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b2ae977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,183 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x13b0002b to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62de434f 2024-12-07T18:20:07,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ed37f32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,186 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1703a605 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7575b91 2024-12-07T18:20:07,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12e88ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,191 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b377948 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7dfc4f36 2024-12-07T18:20:07,194 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f2abe2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,195 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2a9f805a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3505ffc0 2024-12-07T18:20:07,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598ef39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b123525 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61f33e78 2024-12-07T18:20:07,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23d0f458, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,204 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2bd6a663 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@532e5d9f 2024-12-07T18:20:07,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77780196, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,210 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10be4157 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31dd347a 2024-12-07T18:20:07,213 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69ef766, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:07,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:07,216 DEBUG [hconnection-0x309d5812-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-07T18:20:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:07,218 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,218 DEBUG [hconnection-0x58a9cdd1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,218 DEBUG [hconnection-0x7673526d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,218 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:07,219 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,219 DEBUG [hconnection-0x5e68eb28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,219 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,219 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:07,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:07,221 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,221 DEBUG [hconnection-0x255042f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,221 DEBUG [hconnection-0x5aea54e7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,222 DEBUG [hconnection-0x3c14ba38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,222 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,222 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,223 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,223 DEBUG [hconnection-0x23f51866-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,223 DEBUG [hconnection-0xfe46222-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,224 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,224 DEBUG [hconnection-0x6e8ccc93-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:07,225 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,226 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:07,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595667251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595667251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595667252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595667253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595667254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1a8bb5623016478b81ff99781962646b is 50, key is test_row_1/A:col10/1733595607227/Put/seqid=0 2024-12-07T18:20:07,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742102_1278 (size=9657) 2024-12-07T18:20:07,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1a8bb5623016478b81ff99781962646b 2024-12-07T18:20:07,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:07,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/33d23cd461454e1da364e0509469a36e is 50, key is test_row_1/B:col10/1733595607227/Put/seqid=0 2024-12-07T18:20:07,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595667356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595667356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595667356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595667358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595667358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742103_1279 (size=9657) 2024-12-07T18:20:07,371 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-07T18:20:07,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:07,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:07,525 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-07T18:20:07,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:07,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595667559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595667559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595667561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595667562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595667562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,678 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-07T18:20:07,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:07,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:07,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/33d23cd461454e1da364e0509469a36e 2024-12-07T18:20:07,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2fb3fd47ce8e46f5bccf8d9c7c08639d is 50, key is test_row_1/C:col10/1733595607227/Put/seqid=0 2024-12-07T18:20:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742104_1280 (size=9657) 2024-12-07T18:20:07,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2fb3fd47ce8e46f5bccf8d9c7c08639d 2024-12-07T18:20:07,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1a8bb5623016478b81ff99781962646b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b 2024-12-07T18:20:07,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b, entries=100, sequenceid=13, filesize=9.4 K 2024-12-07T18:20:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/33d23cd461454e1da364e0509469a36e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e 2024-12-07T18:20:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:07,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e, entries=100, sequenceid=13, filesize=9.4 K 2024-12-07T18:20:07,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2fb3fd47ce8e46f5bccf8d9c7c08639d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d 2024-12-07T18:20:07,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d, entries=100, sequenceid=13, filesize=9.4 K 2024-12-07T18:20:07,828 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 70dba895e74ad497e9ce7e920215ba59 in 600ms, sequenceid=13, compaction requested=false 2024-12-07T18:20:07,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:07,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-07T18:20:07,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:07,832 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:20:07,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:07,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:07,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:07,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:07,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/61a1cbb1f37a4fa9846185aaa4de049d is 50, key is test_row_0/A:col10/1733595607245/Put/seqid=0 2024-12-07T18:20:07,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742105_1281 (size=12001) 2024-12-07T18:20:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:07,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:07,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595667868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595667869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595667869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595667870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595667870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595667972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595667972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595667972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595667972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:07,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595667973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595668175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595668176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595668176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595668176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595668176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,244 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/61a1cbb1f37a4fa9846185aaa4de049d 2024-12-07T18:20:08,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/e0ea61c624ab47bc89a14a07c552c0bd is 50, key is test_row_0/B:col10/1733595607245/Put/seqid=0 2024-12-07T18:20:08,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742106_1282 (size=12001) 2024-12-07T18:20:08,259 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/e0ea61c624ab47bc89a14a07c552c0bd 2024-12-07T18:20:08,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2d7d824dfa4a45f09b084d173d3ad538 is 50, key is test_row_0/C:col10/1733595607245/Put/seqid=0 2024-12-07T18:20:08,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742107_1283 (size=12001) 2024-12-07T18:20:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:08,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595668478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595668478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595668479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595668479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595668480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:08,676 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2d7d824dfa4a45f09b084d173d3ad538 2024-12-07T18:20:08,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/61a1cbb1f37a4fa9846185aaa4de049d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d 2024-12-07T18:20:08,695 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d, entries=150, sequenceid=38, filesize=11.7 K 2024-12-07T18:20:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/e0ea61c624ab47bc89a14a07c552c0bd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd 2024-12-07T18:20:08,701 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd, entries=150, sequenceid=38, filesize=11.7 K 2024-12-07T18:20:08,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/2d7d824dfa4a45f09b084d173d3ad538 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538 2024-12-07T18:20:08,706 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538, entries=150, sequenceid=38, filesize=11.7 K 2024-12-07T18:20:08,706 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 70dba895e74ad497e9ce7e920215ba59 in 874ms, sequenceid=38, compaction requested=false 2024-12-07T18:20:08,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:08,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:08,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-07T18:20:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-07T18:20:08,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-07T18:20:08,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4890 sec 2024-12-07T18:20:08,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.4940 sec 2024-12-07T18:20:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:08,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:08,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5cf47e1dfecb4a898546b1cd8d266159 is 50, key is test_row_0/A:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:08,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742108_1284 (size=12001) 2024-12-07T18:20:09,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595669035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595669035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595669037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595669038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595669038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595669139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595669139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595669141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595669141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595669141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-07T18:20:09,322 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-07T18:20:09,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:09,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-07T18:20:09,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:09,326 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:09,326 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:09,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:09,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595669342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595669342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595669345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595669345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595669345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5cf47e1dfecb4a898546b1cd8d266159 2024-12-07T18:20:09,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ecfe9740d843440b87037f42be098100 is 50, key is test_row_0/B:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742109_1285 (size=12001) 2024-12-07T18:20:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:09,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-07T18:20:09,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:09,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:09,630 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-07T18:20:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595669646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595669646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595669647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595669649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595669650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-07T18:20:09,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:09,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:09,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ecfe9740d843440b87037f42be098100 2024-12-07T18:20:09,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/a4abeeb75aec41caa8da36da362e973b is 50, key is test_row_0/C:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:09,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742110_1286 (size=12001) 2024-12-07T18:20:09,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/a4abeeb75aec41caa8da36da362e973b 2024-12-07T18:20:09,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5cf47e1dfecb4a898546b1cd8d266159 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159 2024-12-07T18:20:09,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159, entries=150, sequenceid=51, filesize=11.7 K 2024-12-07T18:20:09,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ecfe9740d843440b87037f42be098100 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100 2024-12-07T18:20:09,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100, entries=150, sequenceid=51, filesize=11.7 K 2024-12-07T18:20:09,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/a4abeeb75aec41caa8da36da362e973b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b 2024-12-07T18:20:09,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b, entries=150, sequenceid=51, filesize=11.7 K 2024-12-07T18:20:09,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 70dba895e74ad497e9ce7e920215ba59 in 906ms, sequenceid=51, compaction requested=true 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:09,889 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:09,889 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:09,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:09,890 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:09,890 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:09,890 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,890 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=32.9 K 2024-12-07T18:20:09,890 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:09,890 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:09,890 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,891 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=32.9 K 2024-12-07T18:20:09,891 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a8bb5623016478b81ff99781962646b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733595607227 2024-12-07T18:20:09,891 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 33d23cd461454e1da364e0509469a36e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733595607227 2024-12-07T18:20:09,891 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61a1cbb1f37a4fa9846185aaa4de049d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733595607245 2024-12-07T18:20:09,891 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e0ea61c624ab47bc89a14a07c552c0bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733595607245 2024-12-07T18:20:09,892 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cf47e1dfecb4a898546b1cd8d266159, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:09,892 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ecfe9740d843440b87037f42be098100, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:09,901 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:09,901 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/dde3cba7d3aa4042828308c2156a4225 is 50, key is test_row_0/A:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:09,902 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:09,903 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/c2390ceb6a4b49118dd9fb2e390616cc is 50, key is test_row_0/B:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:09,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742112_1288 (size=12104) 2024-12-07T18:20:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:09,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742111_1287 (size=12104) 2024-12-07T18:20:09,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:09,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-07T18:20:09,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:09,937 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:09,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 is 50, key is test_row_0/A:col10/1733595609037/Put/seqid=0 2024-12-07T18:20:09,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742113_1289 (size=12001) 2024-12-07T18:20:10,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:10,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:10,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595670155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595670156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595670157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595670157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595670158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595670260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595670261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595670261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595670261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,292 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:20:10,332 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/dde3cba7d3aa4042828308c2156a4225 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dde3cba7d3aa4042828308c2156a4225 2024-12-07T18:20:10,336 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/c2390ceb6a4b49118dd9fb2e390616cc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c2390ceb6a4b49118dd9fb2e390616cc 2024-12-07T18:20:10,339 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into dde3cba7d3aa4042828308c2156a4225(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:10,339 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:10,339 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595609889; duration=0sec 2024-12-07T18:20:10,339 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:10,339 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:10,339 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:10,341 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:10,341 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=32.9 K 2024-12-07T18:20:10,341 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into c2390ceb6a4b49118dd9fb2e390616cc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:10,341 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595609889; duration=0sec 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:10,341 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fb3fd47ce8e46f5bccf8d9c7c08639d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733595607227 2024-12-07T18:20:10,342 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d7d824dfa4a45f09b084d173d3ad538, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733595607245 2024-12-07T18:20:10,342 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4abeeb75aec41caa8da36da362e973b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:10,354 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 2024-12-07T18:20:10,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/0c75e8a4b05040908ea4c9b59b5c4f2a is 50, key is test_row_0/B:col10/1733595609037/Put/seqid=0 2024-12-07T18:20:10,368 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:10,368 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/a74aa682a1ac450ebc0cc000343316f5 is 50, key is test_row_0/C:col10/1733595607867/Put/seqid=0 2024-12-07T18:20:10,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742114_1290 (size=12001) 2024-12-07T18:20:10,387 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/0c75e8a4b05040908ea4c9b59b5c4f2a 2024-12-07T18:20:10,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742115_1291 (size=12104) 2024-12-07T18:20:10,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/b48632588e334f25adaea572c680e832 is 50, key is test_row_0/C:col10/1733595609037/Put/seqid=0 2024-12-07T18:20:10,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:10,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742116_1292 (size=12001) 2024-12-07T18:20:10,441 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/b48632588e334f25adaea572c680e832 2024-12-07T18:20:10,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 2024-12-07T18:20:10,456 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2, entries=150, sequenceid=75, filesize=11.7 K 2024-12-07T18:20:10,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/0c75e8a4b05040908ea4c9b59b5c4f2a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a 2024-12-07T18:20:10,464 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a, entries=150, sequenceid=75, filesize=11.7 K 2024-12-07T18:20:10,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595670463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595670464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595670463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/b48632588e334f25adaea572c680e832 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832 2024-12-07T18:20:10,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595670464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,471 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832, entries=150, sequenceid=75, filesize=11.7 K 2024-12-07T18:20:10,472 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 70dba895e74ad497e9ce7e920215ba59 in 535ms, sequenceid=75, compaction requested=false 2024-12-07T18:20:10,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:10,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:10,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-07T18:20:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-07T18:20:10,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-07T18:20:10,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1470 sec 2024-12-07T18:20:10,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.1510 sec 2024-12-07T18:20:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:10,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:10,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:10,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:10,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:10,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:10,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:10,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:10,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7fd3387e312d48f9a80f23d00a193946 is 50, key is test_row_0/A:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:10,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742117_1293 (size=12001) 2024-12-07T18:20:10,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595670793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595670794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595670794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595670795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,820 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/a74aa682a1ac450ebc0cc000343316f5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a74aa682a1ac450ebc0cc000343316f5 2024-12-07T18:20:10,825 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into a74aa682a1ac450ebc0cc000343316f5(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:10,825 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:10,825 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595609889; duration=0sec 2024-12-07T18:20:10,825 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:10,825 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:10,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595670897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595670898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595670898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595670899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595671100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595671101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595671101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595671101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595671164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7fd3387e312d48f9a80f23d00a193946 2024-12-07T18:20:11,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cc75f6c5ddad4533a224afb9b8ad43b1 is 50, key is test_row_0/B:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:11,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742118_1294 (size=12001) 2024-12-07T18:20:11,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595671403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595671404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595671406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595671406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T18:20:11,429 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-07T18:20:11,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-07T18:20:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:11,432 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:11,433 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:11,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:11,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-07T18:20:11,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:11,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cc75f6c5ddad4533a224afb9b8ad43b1 2024-12-07T18:20:11,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c0337cd6b40a4582a94365ed325f15db is 50, key is test_row_0/C:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:11,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742119_1295 (size=12001) 2024-12-07T18:20:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:11,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-07T18:20:11,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:11,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,890 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-07T18:20:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:11,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:11,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595671909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595671909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595671910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:11,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595671912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:12,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c0337cd6b40a4582a94365ed325f15db 2024-12-07T18:20:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7fd3387e312d48f9a80f23d00a193946 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946 2024-12-07T18:20:12,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946, entries=150, sequenceid=91, filesize=11.7 K 2024-12-07T18:20:12,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cc75f6c5ddad4533a224afb9b8ad43b1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1 2024-12-07T18:20:12,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1, entries=150, sequenceid=91, filesize=11.7 K 2024-12-07T18:20:12,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c0337cd6b40a4582a94365ed325f15db as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db 2024-12-07T18:20:12,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db, entries=150, sequenceid=91, filesize=11.7 K 2024-12-07T18:20:12,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 70dba895e74ad497e9ce7e920215ba59 in 1261ms, sequenceid=91, compaction requested=true 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:12,029 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:12,029 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:12,032 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:12,032 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:12,032 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:12,032 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:12,033 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:12,033 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:12,033 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dde3cba7d3aa4042828308c2156a4225, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.3 K 2024-12-07T18:20:12,033 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c2390ceb6a4b49118dd9fb2e390616cc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.3 K 2024-12-07T18:20:12,033 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting dde3cba7d3aa4042828308c2156a4225, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:12,033 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c2390ceb6a4b49118dd9fb2e390616cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:12,033 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c75e8a4b05040908ea4c9b59b5c4f2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733595609034 2024-12-07T18:20:12,033 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c8eb272b88d4cc8b1b93717a5d1b2d2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733595609034 2024-12-07T18:20:12,034 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting cc75f6c5ddad4533a224afb9b8ad43b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:12,034 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd3387e312d48f9a80f23d00a193946, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:12,042 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:12,043 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/0f56649669a34c8da8fa42d4ef2f9421 is 50, key is test_row_0/A:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:12,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:12,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-07T18:20:12,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:12,045 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:12,045 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,045 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/4525ee287e174411820e04eb62145c9c is 50, key is test_row_0/B:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:12,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a673fee586e64a51bd545fa643eaba1d is 50, key is test_row_0/A:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:12,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742121_1297 (size=12207) 2024-12-07T18:20:12,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742122_1298 (size=12001) 2024-12-07T18:20:12,058 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a673fee586e64a51bd545fa643eaba1d 2024-12-07T18:20:12,059 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/4525ee287e174411820e04eb62145c9c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4525ee287e174411820e04eb62145c9c 2024-12-07T18:20:12,065 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into 4525ee287e174411820e04eb62145c9c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:12,065 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:12,065 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595612029; duration=0sec 2024-12-07T18:20:12,065 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:12,065 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:12,065 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:12,066 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:12,066 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:12,067 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:12,067 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a74aa682a1ac450ebc0cc000343316f5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.3 K 2024-12-07T18:20:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742120_1296 (size=12207) 2024-12-07T18:20:12,067 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a74aa682a1ac450ebc0cc000343316f5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595607866 2024-12-07T18:20:12,067 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b48632588e334f25adaea572c680e832, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733595609034 2024-12-07T18:20:12,068 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c0337cd6b40a4582a94365ed325f15db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:12,074 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/0f56649669a34c8da8fa42d4ef2f9421 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f56649669a34c8da8fa42d4ef2f9421 2024-12-07T18:20:12,081 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into 0f56649669a34c8da8fa42d4ef2f9421(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:12,081 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:12,082 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595612029; duration=0sec 2024-12-07T18:20:12,082 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:12,082 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:12,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d0475d48e9844f70977650acb9e292d3 is 50, key is test_row_0/B:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:12,094 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:12,094 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/b101a36f4702442abb779fcdce755430 is 50, key is test_row_0/C:col10/1733595610768/Put/seqid=0 2024-12-07T18:20:12,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742124_1300 (size=12207) 2024-12-07T18:20:12,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742123_1299 (size=12001) 2024-12-07T18:20:12,121 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/b101a36f4702442abb779fcdce755430 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b101a36f4702442abb779fcdce755430 2024-12-07T18:20:12,126 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into b101a36f4702442abb779fcdce755430(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:12,126 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:12,126 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595612029; duration=0sec 2024-12-07T18:20:12,126 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:12,126 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:12,520 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d0475d48e9844f70977650acb9e292d3 2024-12-07T18:20:12,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/951bc439556a4aaa89fdc9da70a3d9fa is 50, key is test_row_0/C:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:12,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:12,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742125_1301 (size=12001) 2024-12-07T18:20:12,558 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/951bc439556a4aaa89fdc9da70a3d9fa 2024-12-07T18:20:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a673fee586e64a51bd545fa643eaba1d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d 2024-12-07T18:20:12,568 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d, entries=150, sequenceid=115, filesize=11.7 K 2024-12-07T18:20:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d0475d48e9844f70977650acb9e292d3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3 2024-12-07T18:20:12,576 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3, entries=150, sequenceid=115, filesize=11.7 K 2024-12-07T18:20:12,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/951bc439556a4aaa89fdc9da70a3d9fa as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa 2024-12-07T18:20:12,582 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa, entries=150, sequenceid=115, filesize=11.7 K 2024-12-07T18:20:12,583 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 70dba895e74ad497e9ce7e920215ba59 in 538ms, sequenceid=115, compaction requested=false 2024-12-07T18:20:12,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:12,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:12,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-07T18:20:12,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-07T18:20:12,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-07T18:20:12,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1520 sec 2024-12-07T18:20:12,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.1570 sec 2024-12-07T18:20:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:12,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:12,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:12,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/c70a1091f7ce48aa90e3cdc41dc4ae8c is 50, key is test_row_1/A:col10/1733595612920/Put/seqid=0 2024-12-07T18:20:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742126_1302 (size=9707) 2024-12-07T18:20:12,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595672945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:12,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595672947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:12,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595672947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:12,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595672948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595673050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595673051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595673051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595673051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595673175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,177 DEBUG [Thread-1263 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:13,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595673253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595673254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595673255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595673255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/c70a1091f7ce48aa90e3cdc41dc4ae8c 2024-12-07T18:20:13,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/704c7872beba45e79a5f04288aba6805 is 50, key is test_row_1/B:col10/1733595612920/Put/seqid=0 2024-12-07T18:20:13,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742127_1303 (size=9707) 2024-12-07T18:20:13,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/704c7872beba45e79a5f04288aba6805 2024-12-07T18:20:13,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 is 50, key is test_row_1/C:col10/1733595612920/Put/seqid=0 2024-12-07T18:20:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742128_1304 (size=9707) 2024-12-07T18:20:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-07T18:20:13,536 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-07T18:20:13,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-07T18:20:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-07T18:20:13,540 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:13,541 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:13,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:13,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595673557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595673557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595673558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595673558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-07T18:20:13,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-07T18:20:13,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:13,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 2024-12-07T18:20:13,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/c70a1091f7ce48aa90e3cdc41dc4ae8c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c 2024-12-07T18:20:13,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c, entries=100, sequenceid=129, filesize=9.5 K 2024-12-07T18:20:13,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/704c7872beba45e79a5f04288aba6805 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805 2024-12-07T18:20:13,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805, entries=100, sequenceid=129, filesize=9.5 K 2024-12-07T18:20:13,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 2024-12-07T18:20:13,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4, entries=100, sequenceid=129, filesize=9.5 K 2024-12-07T18:20:13,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 70dba895e74ad497e9ce7e920215ba59 in 917ms, sequenceid=129, compaction requested=true 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:13,839 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:13,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:13,839 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:13,840 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:13,840 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:13,840 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:13,840 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:13,841 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,841 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,841 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4525ee287e174411820e04eb62145c9c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=33.1 K 2024-12-07T18:20:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-07T18:20:13,841 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f56649669a34c8da8fa42d4ef2f9421, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=33.1 K 2024-12-07T18:20:13,841 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4525ee287e174411820e04eb62145c9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:13,842 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f56649669a34c8da8fa42d4ef2f9421, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:13,842 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d0475d48e9844f70977650acb9e292d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733595610792 2024-12-07T18:20:13,842 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a673fee586e64a51bd545fa643eaba1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733595610792 2024-12-07T18:20:13,842 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 704c7872beba45e79a5f04288aba6805, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595612920 2024-12-07T18:20:13,843 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c70a1091f7ce48aa90e3cdc41dc4ae8c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595612920 2024-12-07T18:20:13,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:13,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,846 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:13,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:13,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/e65cd6cdfab54408852fbd01f30cad6a is 50, key is test_row_0/A:col10/1733595612940/Put/seqid=0 2024-12-07T18:20:13,865 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:13,865 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/1a217e2c855a4d13b1535d732d0a5223 is 50, key is test_row_0/B:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:13,870 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:13,871 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/e6daba5454d6497bafec8c0ca0cd9271 is 50, key is test_row_0/A:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:13,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742129_1305 (size=12151) 2024-12-07T18:20:13,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742130_1306 (size=12359) 2024-12-07T18:20:13,890 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/1a217e2c855a4d13b1535d732d0a5223 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1a217e2c855a4d13b1535d732d0a5223 2024-12-07T18:20:13,894 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into 1a217e2c855a4d13b1535d732d0a5223(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:13,894 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:13,894 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595613839; duration=0sec 2024-12-07T18:20:13,894 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:13,894 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:13,894 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:13,896 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:13,896 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:13,896 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:13,896 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b101a36f4702442abb779fcdce755430, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=33.1 K 2024-12-07T18:20:13,897 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b101a36f4702442abb779fcdce755430, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595610151 2024-12-07T18:20:13,897 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 951bc439556a4aaa89fdc9da70a3d9fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733595610792 2024-12-07T18:20:13,897 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting cf4c6db6c7a44569b9bc2e78a2e2d1b4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595612920 2024-12-07T18:20:13,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742131_1307 (size=12359) 2024-12-07T18:20:13,916 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/e6daba5454d6497bafec8c0ca0cd9271 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e6daba5454d6497bafec8c0ca0cd9271 2024-12-07T18:20:13,919 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#255 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:13,920 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/7d625ef4bbb040e0861b9a7dbd221d8e is 50, key is test_row_0/C:col10/1733595610792/Put/seqid=0 2024-12-07T18:20:13,925 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into e6daba5454d6497bafec8c0ca0cd9271(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:13,925 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:13,925 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595613839; duration=0sec 2024-12-07T18:20:13,925 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:13,925 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:13,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742132_1308 (size=12359) 2024-12-07T18:20:13,940 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/7d625ef4bbb040e0861b9a7dbd221d8e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7d625ef4bbb040e0861b9a7dbd221d8e 2024-12-07T18:20:13,945 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into 7d625ef4bbb040e0861b9a7dbd221d8e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:13,945 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:13,945 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595613839; duration=0sec 2024-12-07T18:20:13,946 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:13,946 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:14,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:14,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-07T18:20:14,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,278 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/e65cd6cdfab54408852fbd01f30cad6a 2024-12-07T18:20:14,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/9d831adf30b14549aac84b9d2359c373 is 50, key is test_row_0/B:col10/1733595612940/Put/seqid=0 2024-12-07T18:20:14,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742133_1309 (size=12151) 2024-12-07T18:20:14,291 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/9d831adf30b14549aac84b9d2359c373 2024-12-07T18:20:14,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/759583fffe8c4eb2ad14c91ba9a5e291 is 50, key is test_row_0/C:col10/1733595612940/Put/seqid=0 2024-12-07T18:20:14,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742134_1310 (size=12151) 2024-12-07T18:20:14,303 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/759583fffe8c4eb2ad14c91ba9a5e291 2024-12-07T18:20:14,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/e65cd6cdfab54408852fbd01f30cad6a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a 2024-12-07T18:20:14,312 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a, entries=150, sequenceid=154, filesize=11.9 K 2024-12-07T18:20:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/9d831adf30b14549aac84b9d2359c373 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373 2024-12-07T18:20:14,323 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373, entries=150, sequenceid=154, filesize=11.9 K 2024-12-07T18:20:14,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/759583fffe8c4eb2ad14c91ba9a5e291 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291 2024-12-07T18:20:14,329 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291, entries=150, sequenceid=154, filesize=11.9 K 2024-12-07T18:20:14,331 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 70dba895e74ad497e9ce7e920215ba59 in 485ms, sequenceid=154, compaction requested=false 2024-12-07T18:20:14,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:14,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-07T18:20:14,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-07T18:20:14,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-07T18:20:14,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 792 msec 2024-12-07T18:20:14,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 798 msec 2024-12-07T18:20:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:14,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:14,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:14,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4ce8b9c7c85d4b568ffef0feb4add935 is 50, key is test_row_0/A:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742135_1311 (size=12151) 2024-12-07T18:20:14,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4ce8b9c7c85d4b568ffef0feb4add935 2024-12-07T18:20:14,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/4a9eb4de357a4a61b81bf25ce8f942bd is 50, key is test_row_0/B:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742136_1312 (size=12151) 2024-12-07T18:20:14,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/4a9eb4de357a4a61b81bf25ce8f942bd 2024-12-07T18:20:14,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/d2e283ae63fc443baffd0ebbd66cb436 is 50, key is test_row_0/C:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742137_1313 (size=12151) 2024-12-07T18:20:14,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/d2e283ae63fc443baffd0ebbd66cb436 2024-12-07T18:20:14,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4ce8b9c7c85d4b568ffef0feb4add935 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935 2024-12-07T18:20:14,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935, entries=150, sequenceid=171, filesize=11.9 K 2024-12-07T18:20:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/4a9eb4de357a4a61b81bf25ce8f942bd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd 2024-12-07T18:20:14,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd, entries=150, sequenceid=171, filesize=11.9 K 2024-12-07T18:20:14,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/d2e283ae63fc443baffd0ebbd66cb436 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436 2024-12-07T18:20:14,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436, entries=150, sequenceid=171, filesize=11.9 K 2024-12-07T18:20:14,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 70dba895e74ad497e9ce7e920215ba59 in 63ms, sequenceid=171, compaction requested=true 2024-12-07T18:20:14,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:14,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:14,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:14,440 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:14,440 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:14,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:14,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:14,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:14,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:14,441 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:14,441 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:14,441 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,441 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1a217e2c855a4d13b1535d732d0a5223, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.8 K 2024-12-07T18:20:14,441 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:14,441 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:14,441 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,441 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e6daba5454d6497bafec8c0ca0cd9271, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.8 K 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a217e2c855a4d13b1535d732d0a5223, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595610792 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6daba5454d6497bafec8c0ca0cd9271, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595610792 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e65cd6cdfab54408852fbd01f30cad6a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733595612940 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d831adf30b14549aac84b9d2359c373, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733595612940 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a9eb4de357a4a61b81bf25ce8f942bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:14,442 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ce8b9c7c85d4b568ffef0feb4add935, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:14,451 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#261 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:14,451 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:14,452 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/90ff71d59e5846b6acd16ec59aab3b1b is 50, key is test_row_0/A:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,453 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/bae62eb3d6ea4198bb2aca169869ae11 is 50, key is test_row_0/B:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742138_1314 (size=12561) 2024-12-07T18:20:14,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742139_1315 (size=12561) 2024-12-07T18:20:14,464 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/90ff71d59e5846b6acd16ec59aab3b1b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/90ff71d59e5846b6acd16ec59aab3b1b 2024-12-07T18:20:14,465 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/bae62eb3d6ea4198bb2aca169869ae11 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bae62eb3d6ea4198bb2aca169869ae11 2024-12-07T18:20:14,471 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into 90ff71d59e5846b6acd16ec59aab3b1b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:14,471 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595614439; duration=0sec 2024-12-07T18:20:14,471 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into bae62eb3d6ea4198bb2aca169869ae11(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:14,471 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595614439; duration=0sec 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:14,471 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:14,472 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:14,472 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:14,472 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,472 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7d625ef4bbb040e0861b9a7dbd221d8e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=35.8 K 2024-12-07T18:20:14,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d625ef4bbb040e0861b9a7dbd221d8e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733595610792 2024-12-07T18:20:14,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 759583fffe8c4eb2ad14c91ba9a5e291, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733595612940 2024-12-07T18:20:14,473 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2e283ae63fc443baffd0ebbd66cb436, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:14,483 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#263 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:14,484 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/85a6cc301390439e8d9be4df4268d60c is 50, key is test_row_0/C:col10/1733595614068/Put/seqid=0 2024-12-07T18:20:14,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742140_1316 (size=12561) 2024-12-07T18:20:14,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:14,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:14,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:14,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/729ddd3c4613454cadee0d53861d29b5 is 50, key is test_row_0/A:col10/1733595614398/Put/seqid=0 2024-12-07T18:20:14,513 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/85a6cc301390439e8d9be4df4268d60c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/85a6cc301390439e8d9be4df4268d60c 2024-12-07T18:20:14,520 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into 85a6cc301390439e8d9be4df4268d60c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:14,520 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:14,520 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595614440; duration=0sec 2024-12-07T18:20:14,520 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:14,521 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:14,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742141_1317 (size=14541) 2024-12-07T18:20:14,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/729ddd3c4613454cadee0d53861d29b5 2024-12-07T18:20:14,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/61c6e9aa8f6549cebd74e9892c407634 is 50, key is test_row_0/B:col10/1733595614398/Put/seqid=0 2024-12-07T18:20:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742142_1318 (size=12151) 2024-12-07T18:20:14,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-07T18:20:14,642 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-07T18:20:14,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:14,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-07T18:20:14,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:14,646 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:14,647 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:14,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:14,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:14,799 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-07T18:20:14,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:14,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595674825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595674828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595674829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595674829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:14,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:14,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-07T18:20:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:14,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:14,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:14,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/61c6e9aa8f6549cebd74e9892c407634 2024-12-07T18:20:14,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af4a62d374c0493db3e223795beb561f is 50, key is test_row_0/C:col10/1733595614398/Put/seqid=0 2024-12-07T18:20:14,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742143_1319 (size=12151) 2024-12-07T18:20:15,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-07T18:20:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595675128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595675130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595675131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595675132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:15,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-07T18:20:15,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:15,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:15,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af4a62d374c0493db3e223795beb561f 2024-12-07T18:20:15,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/729ddd3c4613454cadee0d53861d29b5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5 2024-12-07T18:20:15,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5, entries=200, sequenceid=196, filesize=14.2 K 2024-12-07T18:20:15,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/61c6e9aa8f6549cebd74e9892c407634 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634 2024-12-07T18:20:15,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634, entries=150, sequenceid=196, filesize=11.9 K 2024-12-07T18:20:15,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af4a62d374c0493db3e223795beb561f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f 2024-12-07T18:20:15,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f, entries=150, sequenceid=196, filesize=11.9 K 2024-12-07T18:20:15,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 70dba895e74ad497e9ce7e920215ba59 in 894ms, sequenceid=196, compaction requested=false 2024-12-07T18:20:15,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:15,411 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-07T18:20:15,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,413 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/ffaf8652e96b4065ac3e3491d75a03a8 is 50, key is test_row_0/A:col10/1733595614515/Put/seqid=0 2024-12-07T18:20:15,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742144_1320 (size=12151) 2024-12-07T18:20:15,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:15,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595675656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595675682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595675682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595675682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:15,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595675787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595675787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595675787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595675790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:15,833 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/ffaf8652e96b4065ac3e3491d75a03a8 2024-12-07T18:20:15,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ad821d71b37742fbb9d9924618342cac is 50, key is test_row_0/B:col10/1733595614515/Put/seqid=0 2024-12-07T18:20:15,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742145_1321 (size=12151) 2024-12-07T18:20:15,852 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ad821d71b37742fbb9d9924618342cac 2024-12-07T18:20:15,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/689cf8b505f34858a084c3bb7faecc6f is 50, key is test_row_0/C:col10/1733595614515/Put/seqid=0 2024-12-07T18:20:15,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742146_1322 (size=12151) 2024-12-07T18:20:15,865 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/689cf8b505f34858a084c3bb7faecc6f 2024-12-07T18:20:15,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/ffaf8652e96b4065ac3e3491d75a03a8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8 2024-12-07T18:20:15,875 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8, entries=150, sequenceid=210, filesize=11.9 K 2024-12-07T18:20:15,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ad821d71b37742fbb9d9924618342cac as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac 2024-12-07T18:20:15,885 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac, entries=150, sequenceid=210, filesize=11.9 K 2024-12-07T18:20:15,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/689cf8b505f34858a084c3bb7faecc6f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f 2024-12-07T18:20:15,890 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f, entries=150, sequenceid=210, filesize=11.9 K 2024-12-07T18:20:15,891 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 70dba895e74ad497e9ce7e920215ba59 in 479ms, sequenceid=210, compaction requested=true 2024-12-07T18:20:15,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-07T18:20:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-07T18:20:15,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-07T18:20:15,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2460 sec 2024-12-07T18:20:15,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.2510 sec 2024-12-07T18:20:15,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:16,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7b37034e4f924d56a34e8bd7d768e0c3 is 50, key is test_row_0/A:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:16,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595676002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595676003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595676003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595676004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742147_1323 (size=12151) 2024-12-07T18:20:16,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7b37034e4f924d56a34e8bd7d768e0c3 2024-12-07T18:20:16,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/607391c719f243cd8790e4f9bbce4fd1 is 50, key is test_row_0/B:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:16,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742148_1324 (size=12151) 2024-12-07T18:20:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595676107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595676107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595676107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595676108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595676309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595676309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595676311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595676311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/607391c719f243cd8790e4f9bbce4fd1 2024-12-07T18:20:16,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af7acae620e84eb5a421e83a273bd573 is 50, key is test_row_0/C:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742149_1325 (size=12151) 2024-12-07T18:20:16,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595676612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595676613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595676614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595676616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-07T18:20:16,750 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-07T18:20:16,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:16,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-07T18:20:16,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-07T18:20:16,754 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:16,755 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:16,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:16,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af7acae620e84eb5a421e83a273bd573 2024-12-07T18:20:16,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-07T18:20:16,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/7b37034e4f924d56a34e8bd7d768e0c3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3 2024-12-07T18:20:16,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3, entries=150, sequenceid=236, filesize=11.9 K 2024-12-07T18:20:16,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/607391c719f243cd8790e4f9bbce4fd1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1 2024-12-07T18:20:16,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1, entries=150, sequenceid=236, filesize=11.9 K 2024-12-07T18:20:16,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/af7acae620e84eb5a421e83a273bd573 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573 2024-12-07T18:20:16,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573, entries=150, sequenceid=236, filesize=11.9 K 2024-12-07T18:20:16,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 70dba895e74ad497e9ce7e920215ba59 in 879ms, sequenceid=236, compaction requested=true 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:16,872 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:16,872 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:16,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:16,873 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:16,873 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:16,873 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:16,874 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bae62eb3d6ea4198bb2aca169869ae11, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=47.9 K 2024-12-07T18:20:16,874 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:16,874 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:16,874 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:16,874 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/90ff71d59e5846b6acd16ec59aab3b1b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=50.2 K 2024-12-07T18:20:16,875 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting bae62eb3d6ea4198bb2aca169869ae11, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:16,875 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90ff71d59e5846b6acd16ec59aab3b1b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:16,875 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 61c6e9aa8f6549cebd74e9892c407634, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733595614389 2024-12-07T18:20:16,876 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 729ddd3c4613454cadee0d53861d29b5, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733595614389 2024-12-07T18:20:16,876 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ad821d71b37742fbb9d9924618342cac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595614514 2024-12-07T18:20:16,876 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffaf8652e96b4065ac3e3491d75a03a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595614514 2024-12-07T18:20:16,876 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 607391c719f243cd8790e4f9bbce4fd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:16,877 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b37034e4f924d56a34e8bd7d768e0c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:16,890 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#273 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:16,891 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/55780857107f42cd84cd7e248282e12d is 50, key is test_row_0/B:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:16,902 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:16,902 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/c715d2f690444b1199865e909b921713 is 50, key is test_row_0/A:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:16,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742150_1326 (size=12697) 2024-12-07T18:20:16,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:16,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-07T18:20:16,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:16,907 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:16,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:16,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:16,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a83bfb90b7d44c7d96c8226edb16eefc is 50, key is test_row_0/A:col10/1733595615994/Put/seqid=0 2024-12-07T18:20:16,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742151_1327 (size=12697) 2024-12-07T18:20:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742152_1328 (size=12151) 2024-12-07T18:20:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-07T18:20:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:17,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:17,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595677138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595677139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595677141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595677141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46836 deadline: 1733595677184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,188 DEBUG [Thread-1263 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:17,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595677242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595677243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595677245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595677245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,310 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/55780857107f42cd84cd7e248282e12d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/55780857107f42cd84cd7e248282e12d 2024-12-07T18:20:17,315 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into 55780857107f42cd84cd7e248282e12d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:17,315 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:17,315 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=12, startTime=1733595616872; duration=0sec 2024-12-07T18:20:17,315 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:17,315 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:17,315 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:17,316 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:17,316 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:17,316 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:17,316 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/85a6cc301390439e8d9be4df4268d60c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=47.9 K 2024-12-07T18:20:17,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 85a6cc301390439e8d9be4df4268d60c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733595614068 2024-12-07T18:20:17,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting af4a62d374c0493db3e223795beb561f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733595614389 2024-12-07T18:20:17,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 689cf8b505f34858a084c3bb7faecc6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595614514 2024-12-07T18:20:17,318 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting af7acae620e84eb5a421e83a273bd573, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:17,321 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/c715d2f690444b1199865e909b921713 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c715d2f690444b1199865e909b921713 2024-12-07T18:20:17,322 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a83bfb90b7d44c7d96c8226edb16eefc 2024-12-07T18:20:17,326 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into c715d2f690444b1199865e909b921713(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:17,326 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:17,326 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=12, startTime=1733595616872; duration=0sec 2024-12-07T18:20:17,326 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:17,326 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:17,329 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:17,329 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/34e544f1a04a43589b07ec174db6d33e is 50, key is test_row_0/C:col10/1733595615651/Put/seqid=0 2024-12-07T18:20:17,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7916d06a546e439894cfd022d00ef214 is 50, key is test_row_0/B:col10/1733595615994/Put/seqid=0 2024-12-07T18:20:17,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742153_1329 (size=12697) 2024-12-07T18:20:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742154_1330 (size=12151) 2024-12-07T18:20:17,348 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7916d06a546e439894cfd022d00ef214 2024-12-07T18:20:17,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/946fcbde61f8413fa791db8db39c6f8c is 50, key is test_row_0/C:col10/1733595615994/Put/seqid=0 2024-12-07T18:20:17,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-07T18:20:17,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742155_1331 (size=12151) 2024-12-07T18:20:17,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595677446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595677446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595677449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595677449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,747 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/34e544f1a04a43589b07ec174db6d33e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/34e544f1a04a43589b07ec174db6d33e 2024-12-07T18:20:17,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,752 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into 34e544f1a04a43589b07ec174db6d33e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:17,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595677750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,752 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:17,752 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=12, startTime=1733595616872; duration=0sec 2024-12-07T18:20:17,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595677750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,752 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:17,753 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:17,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595677752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:17,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595677753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:17,759 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/946fcbde61f8413fa791db8db39c6f8c 2024-12-07T18:20:17,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/a83bfb90b7d44c7d96c8226edb16eefc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc 2024-12-07T18:20:17,773 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc, entries=150, sequenceid=247, filesize=11.9 K 2024-12-07T18:20:17,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7916d06a546e439894cfd022d00ef214 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214 2024-12-07T18:20:17,778 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214, entries=150, sequenceid=247, filesize=11.9 K 2024-12-07T18:20:17,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/946fcbde61f8413fa791db8db39c6f8c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c 2024-12-07T18:20:17,784 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c, entries=150, sequenceid=247, filesize=11.9 K 2024-12-07T18:20:17,785 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 70dba895e74ad497e9ce7e920215ba59 in 878ms, sequenceid=247, compaction requested=false 2024-12-07T18:20:17,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:17,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:17,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-07T18:20:17,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-07T18:20:17,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-07T18:20:17,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0310 sec 2024-12-07T18:20:17,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.0370 sec 2024-12-07T18:20:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-07T18:20:17,857 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-07T18:20:17,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-07T18:20:17,860 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:17,861 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:17,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:18,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-07T18:20:18,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:18,015 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:18,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:18,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/13d7e9c734094882890f7905e9cadd16 is 50, key is test_row_0/A:col10/1733595617140/Put/seqid=0 2024-12-07T18:20:18,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742156_1332 (size=12301) 2024-12-07T18:20:18,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:18,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:18,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595678261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595678261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595678262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595678263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595678365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595678365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595678365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595678366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,424 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/13d7e9c734094882890f7905e9cadd16 2024-12-07T18:20:18,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/3903fdf127974fa89ce0fa4a138a166c is 50, key is test_row_0/B:col10/1733595617140/Put/seqid=0 2024-12-07T18:20:18,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742157_1333 (size=12301) 2024-12-07T18:20:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:18,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595678567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595678568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595678568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595678570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,835 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/3903fdf127974fa89ce0fa4a138a166c 2024-12-07T18:20:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c2ab4aa4874f4134a0fee9fae993522d is 50, key is test_row_0/C:col10/1733595617140/Put/seqid=0 2024-12-07T18:20:18,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742158_1334 (size=12301) 2024-12-07T18:20:18,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595678869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595678870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595678870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595678874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:18,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:19,247 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c2ab4aa4874f4134a0fee9fae993522d 2024-12-07T18:20:19,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/13d7e9c734094882890f7905e9cadd16 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16 2024-12-07T18:20:19,256 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16, entries=150, sequenceid=276, filesize=12.0 K 2024-12-07T18:20:19,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/3903fdf127974fa89ce0fa4a138a166c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c 2024-12-07T18:20:19,261 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c, entries=150, sequenceid=276, filesize=12.0 K 2024-12-07T18:20:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/c2ab4aa4874f4134a0fee9fae993522d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d 2024-12-07T18:20:19,265 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d, entries=150, sequenceid=276, filesize=12.0 K 2024-12-07T18:20:19,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-12-07T18:20:19,266 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 70dba895e74ad497e9ce7e920215ba59 in 1252ms, sequenceid=276, compaction requested=true 2024-12-07T18:20:19,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:19,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:19,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-07T18:20:19,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-07T18:20:19,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-07T18:20:19,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4060 sec 2024-12-07T18:20:19,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.4110 sec 2024-12-07T18:20:19,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:19,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:19,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:19,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:19,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:19,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:19,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:19,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:19,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6d528144760a4335bc7d3176adff329c is 50, key is test_row_0/A:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:19,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742159_1335 (size=14741) 2024-12-07T18:20:19,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6d528144760a4335bc7d3176adff329c 2024-12-07T18:20:19,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/1b362d7ad63a44008b364ce774589249 is 50, key is test_row_0/B:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:19,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742160_1336 (size=12301) 2024-12-07T18:20:19,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595679396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595679397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595679397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595679398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595679500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595679502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595679502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595679502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595679702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595679705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595679705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:19,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595679706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:19,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/1b362d7ad63a44008b364ce774589249 2024-12-07T18:20:19,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/7470fad689ce4dffbab0fc81a1e89a03 is 50, key is test_row_0/C:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:19,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742161_1337 (size=12301) 2024-12-07T18:20:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T18:20:19,965 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-07T18:20:19,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-07T18:20:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-07T18:20:19,968 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:19,969 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:19,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595680006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595680009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595680010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595680010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-07T18:20:20,120 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-07T18:20:20,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:20,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:20,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:20,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:20,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/7470fad689ce4dffbab0fc81a1e89a03 2024-12-07T18:20:20,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6d528144760a4335bc7d3176adff329c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c 2024-12-07T18:20:20,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c, entries=200, sequenceid=289, filesize=14.4 K 2024-12-07T18:20:20,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/1b362d7ad63a44008b364ce774589249 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249 2024-12-07T18:20:20,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249, entries=150, sequenceid=289, filesize=12.0 K 2024-12-07T18:20:20,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/7470fad689ce4dffbab0fc81a1e89a03 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03 2024-12-07T18:20:20,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03, entries=150, sequenceid=289, filesize=12.0 K 2024-12-07T18:20:20,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 70dba895e74ad497e9ce7e920215ba59 in 855ms, sequenceid=289, compaction requested=true 2024-12-07T18:20:20,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:20,232 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:20,232 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:20,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:20,233 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51890 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:20,233 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:20,234 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:20,234 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,234 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:20,234 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,234 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c715d2f690444b1199865e909b921713, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=50.7 K 2024-12-07T18:20:20,234 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/55780857107f42cd84cd7e248282e12d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=48.3 K 2024-12-07T18:20:20,234 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 55780857107f42cd84cd7e248282e12d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:20,234 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c715d2f690444b1199865e909b921713, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:20,234 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7916d06a546e439894cfd022d00ef214, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733595615994 2024-12-07T18:20:20,235 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a83bfb90b7d44c7d96c8226edb16eefc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733595615994 2024-12-07T18:20:20,235 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3903fdf127974fa89ce0fa4a138a166c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733595617135 2024-12-07T18:20:20,235 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13d7e9c734094882890f7905e9cadd16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733595617135 2024-12-07T18:20:20,235 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b362d7ad63a44008b364ce774589249, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:20,236 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d528144760a4335bc7d3176adff329c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:20,246 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:20,247 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/c0481bb10cf849a0bbeefe1aef2a75cd is 50, key is test_row_0/B:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:20,247 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#286 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:20,247 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/b1c13826341e4651aa0de206d7aaeec5 is 50, key is test_row_0/A:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:20,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742163_1339 (size=12983) 2024-12-07T18:20:20,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-07T18:20:20,273 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-07T18:20:20,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,274 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:20:20,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:20,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:20,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:20,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742162_1338 (size=12983) 2024-12-07T18:20:20,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/0f3c78d314254fb1a0df8ec5d6aca5e2 is 50, key is test_row_0/A:col10/1733595619396/Put/seqid=0 2024-12-07T18:20:20,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742164_1340 (size=12301) 2024-12-07T18:20:20,287 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/0f3c78d314254fb1a0df8ec5d6aca5e2 2024-12-07T18:20:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d34784bca9a149038fbb44ac2c9fd4ef is 50, key is test_row_0/B:col10/1733595619396/Put/seqid=0 2024-12-07T18:20:20,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742165_1341 (size=12301) 2024-12-07T18:20:20,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:20,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:20,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595680521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595680523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595680524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595680524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-07T18:20:20,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595680625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595680627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595680628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595680628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,671 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/b1c13826341e4651aa0de206d7aaeec5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/b1c13826341e4651aa0de206d7aaeec5 2024-12-07T18:20:20,678 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into b1c13826341e4651aa0de206d7aaeec5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:20,678 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:20,678 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=12, startTime=1733595620231; duration=0sec 2024-12-07T18:20:20,678 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:20,678 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:20,678 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:20,681 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:20,681 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:20,681 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,681 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/34e544f1a04a43589b07ec174db6d33e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=48.3 K 2024-12-07T18:20:20,682 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34e544f1a04a43589b07ec174db6d33e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733595615651 2024-12-07T18:20:20,682 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/c0481bb10cf849a0bbeefe1aef2a75cd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c0481bb10cf849a0bbeefe1aef2a75cd 2024-12-07T18:20:20,682 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 946fcbde61f8413fa791db8db39c6f8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733595615994 2024-12-07T18:20:20,683 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2ab4aa4874f4134a0fee9fae993522d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733595617135 2024-12-07T18:20:20,683 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7470fad689ce4dffbab0fc81a1e89a03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:20,686 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into c0481bb10cf849a0bbeefe1aef2a75cd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:20,686 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:20,686 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=12, startTime=1733595620232; duration=0sec 2024-12-07T18:20:20,686 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:20,686 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:20,692 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:20,693 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/ebdbe5f431d349beb63314eca96ba9b9 is 50, key is test_row_0/C:col10/1733595619376/Put/seqid=0 2024-12-07T18:20:20,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742166_1342 (size=12983) 2024-12-07T18:20:20,701 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/ebdbe5f431d349beb63314eca96ba9b9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/ebdbe5f431d349beb63314eca96ba9b9 2024-12-07T18:20:20,702 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d34784bca9a149038fbb44ac2c9fd4ef 2024-12-07T18:20:20,708 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into ebdbe5f431d349beb63314eca96ba9b9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:20,708 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:20,708 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=12, startTime=1733595620232; duration=0sec 2024-12-07T18:20:20,708 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:20,708 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:20,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/219931b028344ed5ba645b5032909f06 is 50, key is test_row_0/C:col10/1733595619396/Put/seqid=0 2024-12-07T18:20:20,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742167_1343 (size=12301) 2024-12-07T18:20:20,720 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/219931b028344ed5ba645b5032909f06 2024-12-07T18:20:20,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/0f3c78d314254fb1a0df8ec5d6aca5e2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2 2024-12-07T18:20:20,732 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:20:20,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d34784bca9a149038fbb44ac2c9fd4ef as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef 2024-12-07T18:20:20,738 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:20:20,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/219931b028344ed5ba645b5032909f06 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06 2024-12-07T18:20:20,744 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06, entries=150, sequenceid=312, filesize=12.0 K 2024-12-07T18:20:20,745 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 70dba895e74ad497e9ce7e920215ba59 in 471ms, sequenceid=312, compaction requested=false 2024-12-07T18:20:20,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:20,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:20,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-07T18:20:20,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-07T18:20:20,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-07T18:20:20,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 779 msec 2024-12-07T18:20:20,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 785 msec 2024-12-07T18:20:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:20,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:20,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5ff602de4a014fac95f94bf6f4700aa6 is 50, key is test_row_0/A:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:20,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742168_1344 (size=12301) 2024-12-07T18:20:20,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595680850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595680851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595680852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595680852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595680954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595680955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595680955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:20,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:20,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595680956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-07T18:20:21,071 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-07T18:20:21,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:21,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-07T18:20:21,074 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:21,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:21,075 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:21,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:21,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595681156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595681158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595681158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595681159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:21,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-07T18:20:21,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5ff602de4a014fac95f94bf6f4700aa6 2024-12-07T18:20:21,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b529c82d12dd44298e87b580c9910964 is 50, key is test_row_0/B:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:21,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742169_1345 (size=12301) 2024-12-07T18:20:21,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b529c82d12dd44298e87b580c9910964 2024-12-07T18:20:21,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/5d7eef0cb98b47c0bbe537f3ef0ef156 is 50, key is test_row_0/C:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:21,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742170_1346 (size=12301) 2024-12-07T18:20:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:21,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-07T18:20:21,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595681460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595681461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595681461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595681461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-07T18:20:21,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:21,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:21,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-07T18:20:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:21,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/5d7eef0cb98b47c0bbe537f3ef0ef156 2024-12-07T18:20:21,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5ff602de4a014fac95f94bf6f4700aa6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6 2024-12-07T18:20:21,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6, entries=150, sequenceid=331, filesize=12.0 K 2024-12-07T18:20:21,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b529c82d12dd44298e87b580c9910964 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964 2024-12-07T18:20:21,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964, entries=150, sequenceid=331, filesize=12.0 K 2024-12-07T18:20:21,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/5d7eef0cb98b47c0bbe537f3ef0ef156 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156 2024-12-07T18:20:21,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156, entries=150, sequenceid=331, filesize=12.0 K 2024-12-07T18:20:21,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 70dba895e74ad497e9ce7e920215ba59 in 896ms, sequenceid=331, compaction requested=true 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:21,727 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:21,727 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:21,728 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:21,728 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:21,728 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,728 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/b1c13826341e4651aa0de206d7aaeec5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.7 K 2024-12-07T18:20:21,729 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:21,729 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1c13826341e4651aa0de206d7aaeec5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:21,729 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:21,729 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,729 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c0481bb10cf849a0bbeefe1aef2a75cd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.7 K 2024-12-07T18:20:21,729 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c0481bb10cf849a0bbeefe1aef2a75cd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:21,729 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f3c78d314254fb1a0df8ec5d6aca5e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595619392 2024-12-07T18:20:21,730 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d34784bca9a149038fbb44ac2c9fd4ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595619392 2024-12-07T18:20:21,730 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ff602de4a014fac95f94bf6f4700aa6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:21,730 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b529c82d12dd44298e87b580c9910964, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:21,738 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:21,739 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/d7dcb3fac9004ae9890c16f6eec44e4b is 50, key is test_row_0/A:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:21,741 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#295 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:21,742 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/fa283a802e854034a76f0949203cb071 is 50, key is test_row_0/B:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:21,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742172_1348 (size=13085) 2024-12-07T18:20:21,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742171_1347 (size=13085) 2024-12-07T18:20:21,778 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/fa283a802e854034a76f0949203cb071 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/fa283a802e854034a76f0949203cb071 2024-12-07T18:20:21,779 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/d7dcb3fac9004ae9890c16f6eec44e4b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/d7dcb3fac9004ae9890c16f6eec44e4b 2024-12-07T18:20:21,786 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into fa283a802e854034a76f0949203cb071(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:21,786 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:21,786 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595621727; duration=0sec 2024-12-07T18:20:21,786 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into d7dcb3fac9004ae9890c16f6eec44e4b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:21,787 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:21,787 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595621727; duration=0sec 2024-12-07T18:20:21,787 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:21,787 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:21,787 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:21,788 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:21,788 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:21,788 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:21,789 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:21,789 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,789 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/ebdbe5f431d349beb63314eca96ba9b9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.7 K 2024-12-07T18:20:21,789 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ebdbe5f431d349beb63314eca96ba9b9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733595618259 2024-12-07T18:20:21,790 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 219931b028344ed5ba645b5032909f06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733595619392 2024-12-07T18:20:21,790 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d7eef0cb98b47c0bbe537f3ef0ef156, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:21,803 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#296 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:21,803 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/8e4f2052a0a340b19b71d28c02923cc2 is 50, key is test_row_0/C:col10/1733595620510/Put/seqid=0 2024-12-07T18:20:21,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742173_1349 (size=13085) 2024-12-07T18:20:21,816 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/8e4f2052a0a340b19b71d28c02923cc2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/8e4f2052a0a340b19b71d28c02923cc2 2024-12-07T18:20:21,821 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into 8e4f2052a0a340b19b71d28c02923cc2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:21,821 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:21,821 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595621727; duration=0sec 2024-12-07T18:20:21,822 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:21,822 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:21,838 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-07T18:20:21,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:21,839 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-07T18:20:21,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:21,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:21,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:21,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:21,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:21,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:21,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/702d98a4eadd495f9b9be316cd1fe038 is 50, key is test_row_0/A:col10/1733595620850/Put/seqid=0 2024-12-07T18:20:21,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742174_1350 (size=12301) 2024-12-07T18:20:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:21,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:21,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595681974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595681974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595681975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:21,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:21,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595681976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595682077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595682077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595682078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595682078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:22,250 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/702d98a4eadd495f9b9be316cd1fe038 2024-12-07T18:20:22,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/bb37fd7335504d0fa4d45f84c27b025d is 50, key is test_row_0/B:col10/1733595620850/Put/seqid=0 2024-12-07T18:20:22,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742175_1351 (size=12301) 2024-12-07T18:20:22,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595682279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595682279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595682282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595682281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595682583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595682583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595682585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595682586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:22,663 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/bb37fd7335504d0fa4d45f84c27b025d 2024-12-07T18:20:22,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/38429bcb4b314d528c012fd18b4a3114 is 50, key is test_row_0/C:col10/1733595620850/Put/seqid=0 2024-12-07T18:20:22,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742176_1352 (size=12301) 2024-12-07T18:20:23,075 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/38429bcb4b314d528c012fd18b4a3114 2024-12-07T18:20:23,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/702d98a4eadd495f9b9be316cd1fe038 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038 2024-12-07T18:20:23,084 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038, entries=150, sequenceid=355, filesize=12.0 K 2024-12-07T18:20:23,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/bb37fd7335504d0fa4d45f84c27b025d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d 2024-12-07T18:20:23,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:23,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595683088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:23,089 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d, entries=150, sequenceid=355, filesize=12.0 K 2024-12-07T18:20:23,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:23,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595683089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:23,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/38429bcb4b314d528c012fd18b4a3114 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114 2024-12-07T18:20:23,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:23,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595683091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:23,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:23,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595683092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:23,095 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114, entries=150, sequenceid=355, filesize=12.0 K 2024-12-07T18:20:23,095 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 70dba895e74ad497e9ce7e920215ba59 in 1256ms, sequenceid=355, compaction requested=false 2024-12-07T18:20:23,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:23,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:23,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-07T18:20:23,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-07T18:20:23,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-07T18:20:23,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0220 sec 2024-12-07T18:20:23,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.0270 sec 2024-12-07T18:20:23,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-07T18:20:23,179 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-07T18:20:23,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:23,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-07T18:20:23,182 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:23,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-07T18:20:23,182 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:23,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:23,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-07T18:20:23,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:23,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-07T18:20:23,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:23,335 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-07T18:20:23,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:23,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/9a6838fca70049c29f7ee01007c603d2 is 50, key is test_row_0/A:col10/1733595621971/Put/seqid=0 2024-12-07T18:20:23,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742177_1353 (size=12301) 2024-12-07T18:20:23,344 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/9a6838fca70049c29f7ee01007c603d2 2024-12-07T18:20:23,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/98a87a547c98446ea8198b494d913357 is 50, key is test_row_0/B:col10/1733595621971/Put/seqid=0 2024-12-07T18:20:23,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742178_1354 (size=12301) 2024-12-07T18:20:23,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-07T18:20:23,756 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/98a87a547c98446ea8198b494d913357 2024-12-07T18:20:23,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/9bf859b84afe4da6badfbc2f778d990d is 50, key is test_row_0/C:col10/1733595621971/Put/seqid=0 2024-12-07T18:20:23,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742179_1355 (size=12301) 2024-12-07T18:20:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-07T18:20:24,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:24,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:24,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,169 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/9bf859b84afe4da6badfbc2f778d990d 2024-12-07T18:20:24,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/9a6838fca70049c29f7ee01007c603d2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2 2024-12-07T18:20:24,178 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2, entries=150, sequenceid=371, filesize=12.0 K 2024-12-07T18:20:24,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/98a87a547c98446ea8198b494d913357 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357 2024-12-07T18:20:24,182 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357, entries=150, sequenceid=371, filesize=12.0 K 2024-12-07T18:20:24,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/9bf859b84afe4da6badfbc2f778d990d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d 2024-12-07T18:20:24,187 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d, entries=150, sequenceid=371, filesize=12.0 K 2024-12-07T18:20:24,188 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 70dba895e74ad497e9ce7e920215ba59 in 853ms, sequenceid=371, compaction requested=true 2024-12-07T18:20:24,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:24,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-07T18:20:24,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-07T18:20:24,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-07T18:20:24,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0070 sec 2024-12-07T18:20:24,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0110 sec 2024-12-07T18:20:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:24,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:20:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:24,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:24,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/567bfa10d8d3413cb5edaf7811ad4f50 is 50, key is test_row_0/A:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742180_1356 (size=14741) 2024-12-07T18:20:24,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/567bfa10d8d3413cb5edaf7811ad4f50 2024-12-07T18:20:24,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7335f8b739b140e2afdd29ab87cd7b26 is 50, key is test_row_0/B:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742181_1357 (size=12301) 2024-12-07T18:20:24,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7335f8b739b140e2afdd29ab87cd7b26 2024-12-07T18:20:24,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/59b442e31ce64864952b908ec2c1471e is 50, key is test_row_0/C:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742182_1358 (size=12301) 2024-12-07T18:20:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-07T18:20:24,285 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-07T18:20:24,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-07T18:20:24,289 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T18:20:24,289 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:24,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:24,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T18:20:24,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-07T18:20:24,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:24,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T18:20:24,595 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-07T18:20:24,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:24,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:24,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/59b442e31ce64864952b908ec2c1471e 2024-12-07T18:20:24,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/567bfa10d8d3413cb5edaf7811ad4f50 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50 2024-12-07T18:20:24,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50, entries=200, sequenceid=393, filesize=14.4 K 2024-12-07T18:20:24,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/7335f8b739b140e2afdd29ab87cd7b26 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26 2024-12-07T18:20:24,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26, entries=150, sequenceid=393, filesize=12.0 K 2024-12-07T18:20:24,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/59b442e31ce64864952b908ec2c1471e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e 2024-12-07T18:20:24,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e, entries=150, sequenceid=393, filesize=12.0 K 2024-12-07T18:20:24,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 70dba895e74ad497e9ce7e920215ba59 in 464ms, sequenceid=393, compaction requested=true 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:24,680 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:24,680 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:24,682 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52428 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:24,682 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:24,682 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,683 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/d7dcb3fac9004ae9890c16f6eec44e4b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=51.2 K 2024-12-07T18:20:24,683 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7dcb3fac9004ae9890c16f6eec44e4b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:24,683 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 702d98a4eadd495f9b9be316cd1fe038, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733595620848 2024-12-07T18:20:24,693 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a6838fca70049c29f7ee01007c603d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733595621971 2024-12-07T18:20:24,693 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:24,694 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 567bfa10d8d3413cb5edaf7811ad4f50, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:24,694 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:24,694 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,694 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/fa283a802e854034a76f0949203cb071, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=48.8 K 2024-12-07T18:20:24,694 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fa283a802e854034a76f0949203cb071, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:24,694 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting bb37fd7335504d0fa4d45f84c27b025d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733595620848 2024-12-07T18:20:24,695 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 98a87a547c98446ea8198b494d913357, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733595621971 2024-12-07T18:20:24,695 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7335f8b739b140e2afdd29ab87cd7b26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:24,703 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:24,703 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/94ed08c895674d52a64df22b1df680e4 is 50, key is test_row_0/A:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,704 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:24,704 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/0bb29f7a52e64a238b652c10afcd61fe is 50, key is test_row_0/B:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742183_1359 (size=13221) 2024-12-07T18:20:24,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742184_1360 (size=13221) 2024-12-07T18:20:24,719 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/94ed08c895674d52a64df22b1df680e4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/94ed08c895674d52a64df22b1df680e4 2024-12-07T18:20:24,725 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into 94ed08c895674d52a64df22b1df680e4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:24,725 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:24,725 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=12, startTime=1733595624680; duration=0sec 2024-12-07T18:20:24,725 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:24,725 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:24,725 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:20:24,727 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:20:24,727 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:24,727 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,728 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/8e4f2052a0a340b19b71d28c02923cc2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=48.8 K 2024-12-07T18:20:24,728 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e4f2052a0a340b19b71d28c02923cc2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733595620510 2024-12-07T18:20:24,729 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38429bcb4b314d528c012fd18b4a3114, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733595620848 2024-12-07T18:20:24,729 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bf859b84afe4da6badfbc2f778d990d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733595621971 2024-12-07T18:20:24,730 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59b442e31ce64864952b908ec2c1471e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:24,748 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-07T18:20:24,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:24,750 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:24,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:24,754 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#308 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:24,755 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/3083ef34cde240868e7ea23e42fd3032 is 50, key is test_row_0/C:col10/1733595624214/Put/seqid=0 2024-12-07T18:20:24,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5e607a78a71c4097bf063563028f16da is 50, key is test_row_0/A:col10/1733595624228/Put/seqid=0 2024-12-07T18:20:24,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742185_1361 (size=13221) 2024-12-07T18:20:24,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742186_1362 (size=12301) 2024-12-07T18:20:24,762 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5e607a78a71c4097bf063563028f16da 2024-12-07T18:20:24,765 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/3083ef34cde240868e7ea23e42fd3032 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/3083ef34cde240868e7ea23e42fd3032 2024-12-07T18:20:24,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b82cbc8d4d9e4aeabcb053e89f42c435 is 50, key is test_row_0/B:col10/1733595624228/Put/seqid=0 2024-12-07T18:20:24,776 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into 3083ef34cde240868e7ea23e42fd3032(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:24,776 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:24,776 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=12, startTime=1733595624680; duration=0sec 2024-12-07T18:20:24,776 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:24,776 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:24,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742187_1363 (size=12301) 2024-12-07T18:20:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:24,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:24,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T18:20:24,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595684956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595684957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595684958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:24,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:24,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595684959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,115 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/0bb29f7a52e64a238b652c10afcd61fe as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0bb29f7a52e64a238b652c10afcd61fe 2024-12-07T18:20:25,120 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into 0bb29f7a52e64a238b652c10afcd61fe(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:25,120 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,120 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=12, startTime=1733595624680; duration=0sec 2024-12-07T18:20:25,120 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:25,120 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:25,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595685159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595685160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595685160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595685161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,186 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b82cbc8d4d9e4aeabcb053e89f42c435 2024-12-07T18:20:25,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/0c05722e95fc4dc9bb426727a136902d is 50, key is test_row_0/C:col10/1733595624228/Put/seqid=0 2024-12-07T18:20:25,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742188_1364 (size=12301) 2024-12-07T18:20:25,203 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/0c05722e95fc4dc9bb426727a136902d 2024-12-07T18:20:25,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/5e607a78a71c4097bf063563028f16da as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da 2024-12-07T18:20:25,211 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da, entries=150, sequenceid=408, filesize=12.0 K 2024-12-07T18:20:25,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/b82cbc8d4d9e4aeabcb053e89f42c435 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435 2024-12-07T18:20:25,216 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435, entries=150, sequenceid=408, filesize=12.0 K 2024-12-07T18:20:25,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/0c05722e95fc4dc9bb426727a136902d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d 2024-12-07T18:20:25,220 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d, entries=150, sequenceid=408, filesize=12.0 K 2024-12-07T18:20:25,221 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 70dba895e74ad497e9ce7e920215ba59 in 472ms, sequenceid=408, compaction requested=false 2024-12-07T18:20:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-07T18:20:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-07T18:20:25,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-07T18:20:25,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 932 msec 2024-12-07T18:20:25,225 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 937 msec 2024-12-07T18:20:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T18:20:25,393 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-07T18:20:25,394 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-07T18:20:25,395 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-07T18:20:25,396 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:25,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:25,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:25,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/08ddbcd9efbd4272b41473d0bf31e8b6 is 50, key is test_row_0/A:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742189_1365 (size=12301) 2024-12-07T18:20:25,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/08ddbcd9efbd4272b41473d0bf31e8b6 2024-12-07T18:20:25,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595685477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595685480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595685480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595685481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/5c08f260a4794e7994858aae3dfab6ed is 50, key is test_row_0/B:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742190_1366 (size=12301) 2024-12-07T18:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-07T18:20:25,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-07T18:20:25,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:25,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595685582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595685584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595685584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595685585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-07T18:20:25,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-07T18:20:25,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:25,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595685784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595685786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595685787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595685787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:25,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-07T18:20:25,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:25,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:25,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/5c08f260a4794e7994858aae3dfab6ed 2024-12-07T18:20:25,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cc6b35186dd0469aaedc60720fd10e6b is 50, key is test_row_0/C:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742191_1367 (size=12301) 2024-12-07T18:20:25,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cc6b35186dd0469aaedc60720fd10e6b 2024-12-07T18:20:25,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/08ddbcd9efbd4272b41473d0bf31e8b6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6 2024-12-07T18:20:25,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6, entries=150, sequenceid=434, filesize=12.0 K 2024-12-07T18:20:25,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/5c08f260a4794e7994858aae3dfab6ed as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed 2024-12-07T18:20:25,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed, entries=150, sequenceid=434, filesize=12.0 K 2024-12-07T18:20:25,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/cc6b35186dd0469aaedc60720fd10e6b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b 2024-12-07T18:20:25,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b, entries=150, sequenceid=434, filesize=12.0 K 2024-12-07T18:20:25,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 70dba895e74ad497e9ce7e920215ba59 in 465ms, sequenceid=434, compaction requested=true 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:25,930 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:25,930 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:25,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:25,931 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:25,931 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:25,932 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/A is initiating minor compaction (all files) 2024-12-07T18:20:25,932 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/B is initiating minor compaction (all files) 2024-12-07T18:20:25,932 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/A in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,932 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/94ed08c895674d52a64df22b1df680e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.9 K 2024-12-07T18:20:25,932 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/B in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,932 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0bb29f7a52e64a238b652c10afcd61fe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.9 K 2024-12-07T18:20:25,932 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bb29f7a52e64a238b652c10afcd61fe, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:25,932 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94ed08c895674d52a64df22b1df680e4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:25,933 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting b82cbc8d4d9e4aeabcb053e89f42c435, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733595624218 2024-12-07T18:20:25,933 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e607a78a71c4097bf063563028f16da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733595624218 2024-12-07T18:20:25,933 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c08f260a4794e7994858aae3dfab6ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733595624852 2024-12-07T18:20:25,933 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08ddbcd9efbd4272b41473d0bf31e8b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733595624852 2024-12-07T18:20:25,942 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#A#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:25,942 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#B#compaction#316 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:25,942 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/30ff459099634d36a257cce6b75be197 is 50, key is test_row_0/A:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,943 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ea8af9adb4ec4a10816ca722c41530be is 50, key is test_row_0/B:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742193_1369 (size=13323) 2024-12-07T18:20:25,958 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ea8af9adb4ec4a10816ca722c41530be as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ea8af9adb4ec4a10816ca722c41530be 2024-12-07T18:20:25,965 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/B of 70dba895e74ad497e9ce7e920215ba59 into ea8af9adb4ec4a10816ca722c41530be(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:25,965 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,965 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/B, priority=13, startTime=1733595625930; duration=0sec 2024-12-07T18:20:25,966 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:25,966 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:25,966 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:25,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742192_1368 (size=13323) 2024-12-07T18:20:25,969 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:25,969 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 70dba895e74ad497e9ce7e920215ba59/C is initiating minor compaction (all files) 2024-12-07T18:20:25,970 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 70dba895e74ad497e9ce7e920215ba59/C in TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:25,970 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/3083ef34cde240868e7ea23e42fd3032, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp, totalSize=36.9 K 2024-12-07T18:20:25,970 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3083ef34cde240868e7ea23e42fd3032, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733595624107 2024-12-07T18:20:25,971 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c05722e95fc4dc9bb426727a136902d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733595624218 2024-12-07T18:20:25,971 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting cc6b35186dd0469aaedc60720fd10e6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733595624852 2024-12-07T18:20:25,972 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/30ff459099634d36a257cce6b75be197 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/30ff459099634d36a257cce6b75be197 2024-12-07T18:20:25,978 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/A of 70dba895e74ad497e9ce7e920215ba59 into 30ff459099634d36a257cce6b75be197(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:25,979 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,979 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/A, priority=13, startTime=1733595625930; duration=0sec 2024-12-07T18:20:25,979 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:25,979 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:25,982 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 70dba895e74ad497e9ce7e920215ba59#C#compaction#317 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:25,983 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/d1e81061b0694e089d7df7a07914594e is 50, key is test_row_0/C:col10/1733595624852/Put/seqid=0 2024-12-07T18:20:25,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742194_1370 (size=13323) 2024-12-07T18:20:25,994 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/d1e81061b0694e089d7df7a07914594e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d1e81061b0694e089d7df7a07914594e 2024-12-07T18:20:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-07T18:20:25,999 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 70dba895e74ad497e9ce7e920215ba59/C of 70dba895e74ad497e9ce7e920215ba59 into d1e81061b0694e089d7df7a07914594e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:25,999 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:25,999 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59., storeName=70dba895e74ad497e9ce7e920215ba59/C, priority=13, startTime=1733595625930; duration=0sec 2024-12-07T18:20:25,999 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:26,000 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:26,006 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-07T18:20:26,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:26,007 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6bde1e6c7897471f807d8c8a9e678ba2 is 50, key is test_row_0/A:col10/1733595625474/Put/seqid=0 2024-12-07T18:20:26,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742195_1371 (size=12301) 2024-12-07T18:20:26,019 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6bde1e6c7897471f807d8c8a9e678ba2 2024-12-07T18:20:26,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cbc4c8ab68474d1d893864a9bd2ba893 is 50, key is test_row_0/B:col10/1733595625474/Put/seqid=0 2024-12-07T18:20:26,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742196_1372 (size=12301) 2024-12-07T18:20:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:26,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:26,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595686102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595686103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595686104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595686104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595686205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595686206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595686207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595686207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595686409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595686409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595686409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595686410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,432 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cbc4c8ab68474d1d893864a9bd2ba893 2024-12-07T18:20:26,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 is 50, key is test_row_0/C:col10/1733595625474/Put/seqid=0 2024-12-07T18:20:26,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742197_1373 (size=12301) 2024-12-07T18:20:26,457 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 2024-12-07T18:20:26,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/6bde1e6c7897471f807d8c8a9e678ba2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6bde1e6c7897471f807d8c8a9e678ba2 2024-12-07T18:20:26,466 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6bde1e6c7897471f807d8c8a9e678ba2, entries=150, sequenceid=451, filesize=12.0 K 2024-12-07T18:20:26,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/cbc4c8ab68474d1d893864a9bd2ba893 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cbc4c8ab68474d1d893864a9bd2ba893 2024-12-07T18:20:26,471 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cbc4c8ab68474d1d893864a9bd2ba893, entries=150, sequenceid=451, filesize=12.0 K 2024-12-07T18:20:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 2024-12-07T18:20:26,476 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/459bd0c7dbe54f2fa88d70a7cd6a1da9, entries=150, sequenceid=451, filesize=12.0 K 2024-12-07T18:20:26,477 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 70dba895e74ad497e9ce7e920215ba59 in 470ms, sequenceid=451, compaction requested=false 2024-12-07T18:20:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-07T18:20:26,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-07T18:20:26,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-07T18:20:26,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0830 sec 2024-12-07T18:20:26,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.0880 sec 2024-12-07T18:20:26,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-07T18:20:26,499 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-07T18:20:26,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-12-07T18:20:26,501 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-07T18:20:26,502 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:26,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:26,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-07T18:20:26,654 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-07T18:20:26,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:26,654 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:26,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:26,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/83396209dfba4b16926636761fafaf31 is 50, key is test_row_0/A:col10/1733595626103/Put/seqid=0 2024-12-07T18:20:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742198_1374 (size=12301) 2024-12-07T18:20:26,665 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/83396209dfba4b16926636761fafaf31 2024-12-07T18:20:26,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/362eda73e85a401882500bffdb29a15a is 50, key is test_row_0/B:col10/1733595626103/Put/seqid=0 2024-12-07T18:20:26,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742199_1375 (size=12301) 2024-12-07T18:20:26,681 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/362eda73e85a401882500bffdb29a15a 2024-12-07T18:20:26,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/6289d5ced001443eb11d928841ceb270 is 50, key is test_row_0/C:col10/1733595626103/Put/seqid=0 2024-12-07T18:20:26,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742200_1376 (size=12301) 2024-12-07T18:20:26,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:26,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. as already flushing 2024-12-07T18:20:26,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595686720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595686721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595686722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595686722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-07T18:20:26,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595686823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595686825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595686825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:26,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:26,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595686825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46792 deadline: 1733595687027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46820 deadline: 1733595687028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733595687028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:27,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46832 deadline: 1733595687029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,096 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/6289d5ced001443eb11d928841ceb270 2024-12-07T18:20:27,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/83396209dfba4b16926636761fafaf31 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/83396209dfba4b16926636761fafaf31 2024-12-07T18:20:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-07T18:20:27,104 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/83396209dfba4b16926636761fafaf31, entries=150, sequenceid=473, filesize=12.0 K 2024-12-07T18:20:27,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/362eda73e85a401882500bffdb29a15a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/362eda73e85a401882500bffdb29a15a 2024-12-07T18:20:27,109 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/362eda73e85a401882500bffdb29a15a, entries=150, sequenceid=473, filesize=12.0 K 2024-12-07T18:20:27,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/6289d5ced001443eb11d928841ceb270 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/6289d5ced001443eb11d928841ceb270 2024-12-07T18:20:27,113 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/6289d5ced001443eb11d928841ceb270, entries=150, sequenceid=473, filesize=12.0 K 2024-12-07T18:20:27,115 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 70dba895e74ad497e9ce7e920215ba59 in 461ms, sequenceid=473, compaction requested=true 2024-12-07T18:20:27,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:27,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:27,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-12-07T18:20:27,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-12-07T18:20:27,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-07T18:20:27,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 614 msec 2024-12-07T18:20:27,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 618 msec 2024-12-07T18:20:27,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:27,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:27,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/dbcbfb120851411c8d43c733583eda5f is 50, key is test_row_0/A:col10/1733595627208/Put/seqid=0 2024-12-07T18:20:27,216 DEBUG [Thread-1276 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10be4157 to 127.0.0.1:56016 2024-12-07T18:20:27,216 DEBUG [Thread-1276 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,217 DEBUG [Thread-1263 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x13b0002b to 127.0.0.1:56016 2024-12-07T18:20:27,217 DEBUG [Thread-1263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,218 DEBUG [Thread-1268 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b377948 to 127.0.0.1:56016 2024-12-07T18:20:27,218 DEBUG [Thread-1268 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,218 DEBUG [Thread-1274 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2bd6a663 to 127.0.0.1:56016 2024-12-07T18:20:27,218 DEBUG [Thread-1274 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,219 DEBUG [Thread-1272 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b123525 to 127.0.0.1:56016 2024-12-07T18:20:27,219 DEBUG [Thread-1272 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,219 DEBUG [Thread-1270 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2a9f805a to 127.0.0.1:56016 2024-12-07T18:20:27,219 DEBUG [Thread-1270 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742201_1377 (size=12301) 2024-12-07T18:20:27,332 DEBUG [Thread-1265 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1703a605 to 127.0.0.1:56016 2024-12-07T18:20:27,332 DEBUG [Thread-1261 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cd6e3ed to 127.0.0.1:56016 2024-12-07T18:20:27,332 DEBUG [Thread-1265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,332 DEBUG [Thread-1261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,334 DEBUG [Thread-1259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cc79dd6 to 127.0.0.1:56016 2024-12-07T18:20:27,334 DEBUG [Thread-1259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,334 DEBUG [Thread-1257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4becc07d to 127.0.0.1:56016 2024-12-07T18:20:27,334 DEBUG [Thread-1257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-07T18:20:27,605 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7253 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7094 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6785 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7260 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7103 2024-12-07T18:20:27,605 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:20:27,605 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:20:27,605 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b01e9bb to 127.0.0.1:56016 2024-12-07T18:20:27,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:27,606 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:20:27,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:20:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:27,609 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595627608"}]},"ts":"1733595627608"} 2024-12-07T18:20:27,609 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:20:27,611 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:20:27,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:20:27,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, UNASSIGN}] 2024-12-07T18:20:27,613 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, UNASSIGN 2024-12-07T18:20:27,614 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=70dba895e74ad497e9ce7e920215ba59, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,615 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:20:27,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure 70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:27,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/dbcbfb120851411c8d43c733583eda5f 2024-12-07T18:20:27,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ba68295da6e04510bcc85f638d44537c is 50, key is test_row_0/B:col10/1733595627208/Put/seqid=0 2024-12-07T18:20:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742202_1378 (size=12301) 2024-12-07T18:20:27,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:27,766 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:27,766 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:27,766 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:20:27,766 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing 70dba895e74ad497e9ce7e920215ba59, disabling compactions & flushes 2024-12-07T18:20:27,767 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:28,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ba68295da6e04510bcc85f638d44537c 2024-12-07T18:20:28,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/02d742d5136349f6ab7c05a7ff22980d is 50, key is test_row_0/C:col10/1733595627208/Put/seqid=0 2024-12-07T18:20:28,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742203_1379 (size=12301) 2024-12-07T18:20:28,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:28,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/02d742d5136349f6ab7c05a7ff22980d 2024-12-07T18:20:28,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/dbcbfb120851411c8d43c733583eda5f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dbcbfb120851411c8d43c733583eda5f 2024-12-07T18:20:28,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dbcbfb120851411c8d43c733583eda5f, entries=150, sequenceid=489, filesize=12.0 K 2024-12-07T18:20:28,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/ba68295da6e04510bcc85f638d44537c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ba68295da6e04510bcc85f638d44537c 2024-12-07T18:20:28,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ba68295da6e04510bcc85f638d44537c, entries=150, sequenceid=489, filesize=12.0 K 2024-12-07T18:20:28,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/02d742d5136349f6ab7c05a7ff22980d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/02d742d5136349f6ab7c05a7ff22980d 2024-12-07T18:20:28,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/02d742d5136349f6ab7c05a7ff22980d, entries=150, sequenceid=489, filesize=12.0 K 2024-12-07T18:20:28,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=46.96 KB/48090 for 70dba895e74ad497e9ce7e920215ba59 in 1248ms, sequenceid=489, compaction requested=true 2024-12-07T18:20:28,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:28,457 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:28,457 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:28,457 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. after waiting 0 ms 2024-12-07T18:20:28,457 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:28,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:28,457 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. because compaction request was cancelled 2024-12-07T18:20:28,457 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:A 2024-12-07T18:20:28,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:28,457 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(2837): Flushing 70dba895e74ad497e9ce7e920215ba59 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-07T18:20:28,458 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. because compaction request was cancelled 2024-12-07T18:20:28,458 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:B 2024-12-07T18:20:28,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:B, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:28,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:28,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 70dba895e74ad497e9ce7e920215ba59:C, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:28,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:28,458 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. because compaction request was cancelled 2024-12-07T18:20:28,458 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 70dba895e74ad497e9ce7e920215ba59:C 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=A 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=B 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 70dba895e74ad497e9ce7e920215ba59, store=C 2024-12-07T18:20:28,458 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:28,461 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4f682eab218b43b192826bb41221641c is 50, key is test_row_0/A:col10/1733595627333/Put/seqid=0 2024-12-07T18:20:28,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742204_1380 (size=12301) 2024-12-07T18:20:28,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:28,866 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4f682eab218b43b192826bb41221641c 2024-12-07T18:20:28,872 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d6e71f807f9e415db093463d930fa3e1 is 50, key is test_row_0/B:col10/1733595627333/Put/seqid=0 2024-12-07T18:20:28,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742205_1381 (size=12301) 2024-12-07T18:20:29,276 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d6e71f807f9e415db093463d930fa3e1 2024-12-07T18:20:29,282 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/4cee877893b641ed92740a94863b02c7 is 50, key is test_row_0/C:col10/1733595627333/Put/seqid=0 2024-12-07T18:20:29,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742206_1382 (size=12301) 2024-12-07T18:20:29,686 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/4cee877893b641ed92740a94863b02c7 2024-12-07T18:20:29,690 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/A/4f682eab218b43b192826bb41221641c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4f682eab218b43b192826bb41221641c 2024-12-07T18:20:29,693 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4f682eab218b43b192826bb41221641c, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:20:29,694 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/B/d6e71f807f9e415db093463d930fa3e1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d6e71f807f9e415db093463d930fa3e1 2024-12-07T18:20:29,696 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d6e71f807f9e415db093463d930fa3e1, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:20:29,697 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/.tmp/C/4cee877893b641ed92740a94863b02c7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/4cee877893b641ed92740a94863b02c7 2024-12-07T18:20:29,700 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/4cee877893b641ed92740a94863b02c7, entries=150, sequenceid=499, filesize=12.0 K 2024-12-07T18:20:29,701 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 70dba895e74ad497e9ce7e920215ba59 in 1244ms, sequenceid=499, compaction requested=true 2024-12-07T18:20:29,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dde3cba7d3aa4042828308c2156a4225, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f56649669a34c8da8fa42d4ef2f9421, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e6daba5454d6497bafec8c0ca0cd9271, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/90ff71d59e5846b6acd16ec59aab3b1b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c715d2f690444b1199865e909b921713, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/b1c13826341e4651aa0de206d7aaeec5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/d7dcb3fac9004ae9890c16f6eec44e4b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/94ed08c895674d52a64df22b1df680e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6] to archive 2024-12-07T18:20:29,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:29,704 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1a8bb5623016478b81ff99781962646b 2024-12-07T18:20:29,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/61a1cbb1f37a4fa9846185aaa4de049d 2024-12-07T18:20:29,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dde3cba7d3aa4042828308c2156a4225 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dde3cba7d3aa4042828308c2156a4225 2024-12-07T18:20:29,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5cf47e1dfecb4a898546b1cd8d266159 2024-12-07T18:20:29,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/1c8eb272b88d4cc8b1b93717a5d1b2d2 2024-12-07T18:20:29,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f56649669a34c8da8fa42d4ef2f9421 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f56649669a34c8da8fa42d4ef2f9421 2024-12-07T18:20:29,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7fd3387e312d48f9a80f23d00a193946 2024-12-07T18:20:29,710 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a673fee586e64a51bd545fa643eaba1d 2024-12-07T18:20:29,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e6daba5454d6497bafec8c0ca0cd9271 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e6daba5454d6497bafec8c0ca0cd9271 2024-12-07T18:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:29,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c70a1091f7ce48aa90e3cdc41dc4ae8c 2024-12-07T18:20:29,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/e65cd6cdfab54408852fbd01f30cad6a 2024-12-07T18:20:29,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/90ff71d59e5846b6acd16ec59aab3b1b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/90ff71d59e5846b6acd16ec59aab3b1b 2024-12-07T18:20:29,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4ce8b9c7c85d4b568ffef0feb4add935 2024-12-07T18:20:29,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/729ddd3c4613454cadee0d53861d29b5 2024-12-07T18:20:29,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/ffaf8652e96b4065ac3e3491d75a03a8 2024-12-07T18:20:29,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c715d2f690444b1199865e909b921713 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/c715d2f690444b1199865e909b921713 2024-12-07T18:20:29,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/7b37034e4f924d56a34e8bd7d768e0c3 2024-12-07T18:20:29,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/a83bfb90b7d44c7d96c8226edb16eefc 2024-12-07T18:20:29,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/13d7e9c734094882890f7905e9cadd16 2024-12-07T18:20:29,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6d528144760a4335bc7d3176adff329c 2024-12-07T18:20:29,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/b1c13826341e4651aa0de206d7aaeec5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/b1c13826341e4651aa0de206d7aaeec5 2024-12-07T18:20:29,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/0f3c78d314254fb1a0df8ec5d6aca5e2 2024-12-07T18:20:29,723 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/d7dcb3fac9004ae9890c16f6eec44e4b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/d7dcb3fac9004ae9890c16f6eec44e4b 2024-12-07T18:20:29,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5ff602de4a014fac95f94bf6f4700aa6 2024-12-07T18:20:29,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/702d98a4eadd495f9b9be316cd1fe038 2024-12-07T18:20:29,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/9a6838fca70049c29f7ee01007c603d2 2024-12-07T18:20:29,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/567bfa10d8d3413cb5edaf7811ad4f50 2024-12-07T18:20:29,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/94ed08c895674d52a64df22b1df680e4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/94ed08c895674d52a64df22b1df680e4 2024-12-07T18:20:29,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/5e607a78a71c4097bf063563028f16da 2024-12-07T18:20:29,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/08ddbcd9efbd4272b41473d0bf31e8b6 2024-12-07T18:20:29,730 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c2390ceb6a4b49118dd9fb2e390616cc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4525ee287e174411820e04eb62145c9c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1a217e2c855a4d13b1535d732d0a5223, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bae62eb3d6ea4198bb2aca169869ae11, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/55780857107f42cd84cd7e248282e12d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c0481bb10cf849a0bbeefe1aef2a75cd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/fa283a802e854034a76f0949203cb071, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0bb29f7a52e64a238b652c10afcd61fe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed] to archive 2024-12-07T18:20:29,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:29,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/33d23cd461454e1da364e0509469a36e 2024-12-07T18:20:29,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/e0ea61c624ab47bc89a14a07c552c0bd 2024-12-07T18:20:29,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c2390ceb6a4b49118dd9fb2e390616cc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c2390ceb6a4b49118dd9fb2e390616cc 2024-12-07T18:20:29,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ecfe9740d843440b87037f42be098100 2024-12-07T18:20:29,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0c75e8a4b05040908ea4c9b59b5c4f2a 2024-12-07T18:20:29,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4525ee287e174411820e04eb62145c9c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4525ee287e174411820e04eb62145c9c 2024-12-07T18:20:29,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cc75f6c5ddad4533a224afb9b8ad43b1 2024-12-07T18:20:29,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d0475d48e9844f70977650acb9e292d3 2024-12-07T18:20:29,740 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1a217e2c855a4d13b1535d732d0a5223 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1a217e2c855a4d13b1535d732d0a5223 2024-12-07T18:20:29,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/704c7872beba45e79a5f04288aba6805 2024-12-07T18:20:29,742 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/9d831adf30b14549aac84b9d2359c373 2024-12-07T18:20:29,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bae62eb3d6ea4198bb2aca169869ae11 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bae62eb3d6ea4198bb2aca169869ae11 2024-12-07T18:20:29,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/4a9eb4de357a4a61b81bf25ce8f942bd 2024-12-07T18:20:29,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/61c6e9aa8f6549cebd74e9892c407634 2024-12-07T18:20:29,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ad821d71b37742fbb9d9924618342cac 2024-12-07T18:20:29,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/55780857107f42cd84cd7e248282e12d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/55780857107f42cd84cd7e248282e12d 2024-12-07T18:20:29,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/607391c719f243cd8790e4f9bbce4fd1 2024-12-07T18:20:29,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7916d06a546e439894cfd022d00ef214 2024-12-07T18:20:29,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/3903fdf127974fa89ce0fa4a138a166c 2024-12-07T18:20:29,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c0481bb10cf849a0bbeefe1aef2a75cd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/c0481bb10cf849a0bbeefe1aef2a75cd 2024-12-07T18:20:29,751 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/1b362d7ad63a44008b364ce774589249 2024-12-07T18:20:29,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d34784bca9a149038fbb44ac2c9fd4ef 2024-12-07T18:20:29,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/fa283a802e854034a76f0949203cb071 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/fa283a802e854034a76f0949203cb071 2024-12-07T18:20:29,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b529c82d12dd44298e87b580c9910964 2024-12-07T18:20:29,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/bb37fd7335504d0fa4d45f84c27b025d 2024-12-07T18:20:29,755 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/98a87a547c98446ea8198b494d913357 2024-12-07T18:20:29,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0bb29f7a52e64a238b652c10afcd61fe to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/0bb29f7a52e64a238b652c10afcd61fe 2024-12-07T18:20:29,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/7335f8b739b140e2afdd29ab87cd7b26 2024-12-07T18:20:29,757 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/b82cbc8d4d9e4aeabcb053e89f42c435 2024-12-07T18:20:29,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/5c08f260a4794e7994858aae3dfab6ed 2024-12-07T18:20:29,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a74aa682a1ac450ebc0cc000343316f5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b101a36f4702442abb779fcdce755430, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7d625ef4bbb040e0861b9a7dbd221d8e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/85a6cc301390439e8d9be4df4268d60c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/34e544f1a04a43589b07ec174db6d33e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/ebdbe5f431d349beb63314eca96ba9b9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/8e4f2052a0a340b19b71d28c02923cc2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/3083ef34cde240868e7ea23e42fd3032, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b] to archive 2024-12-07T18:20:29,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:20:29,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2fb3fd47ce8e46f5bccf8d9c7c08639d 2024-12-07T18:20:29,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/2d7d824dfa4a45f09b084d173d3ad538 2024-12-07T18:20:29,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a74aa682a1ac450ebc0cc000343316f5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a74aa682a1ac450ebc0cc000343316f5 2024-12-07T18:20:29,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/a4abeeb75aec41caa8da36da362e973b 2024-12-07T18:20:29,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b48632588e334f25adaea572c680e832 2024-12-07T18:20:29,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b101a36f4702442abb779fcdce755430 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/b101a36f4702442abb779fcdce755430 2024-12-07T18:20:29,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c0337cd6b40a4582a94365ed325f15db 2024-12-07T18:20:29,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/951bc439556a4aaa89fdc9da70a3d9fa 2024-12-07T18:20:29,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7d625ef4bbb040e0861b9a7dbd221d8e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7d625ef4bbb040e0861b9a7dbd221d8e 2024-12-07T18:20:29,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cf4c6db6c7a44569b9bc2e78a2e2d1b4 2024-12-07T18:20:29,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/759583fffe8c4eb2ad14c91ba9a5e291 2024-12-07T18:20:29,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/85a6cc301390439e8d9be4df4268d60c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/85a6cc301390439e8d9be4df4268d60c 2024-12-07T18:20:29,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d2e283ae63fc443baffd0ebbd66cb436 2024-12-07T18:20:29,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af4a62d374c0493db3e223795beb561f 2024-12-07T18:20:29,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/689cf8b505f34858a084c3bb7faecc6f 2024-12-07T18:20:29,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/34e544f1a04a43589b07ec174db6d33e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/34e544f1a04a43589b07ec174db6d33e 2024-12-07T18:20:29,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/af7acae620e84eb5a421e83a273bd573 2024-12-07T18:20:29,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/946fcbde61f8413fa791db8db39c6f8c 2024-12-07T18:20:29,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/c2ab4aa4874f4134a0fee9fae993522d 2024-12-07T18:20:29,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/ebdbe5f431d349beb63314eca96ba9b9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/ebdbe5f431d349beb63314eca96ba9b9 2024-12-07T18:20:29,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/7470fad689ce4dffbab0fc81a1e89a03 2024-12-07T18:20:29,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/219931b028344ed5ba645b5032909f06 2024-12-07T18:20:29,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/8e4f2052a0a340b19b71d28c02923cc2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/8e4f2052a0a340b19b71d28c02923cc2 2024-12-07T18:20:29,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/5d7eef0cb98b47c0bbe537f3ef0ef156 2024-12-07T18:20:29,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/38429bcb4b314d528c012fd18b4a3114 2024-12-07T18:20:29,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/9bf859b84afe4da6badfbc2f778d990d 2024-12-07T18:20:29,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/3083ef34cde240868e7ea23e42fd3032 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/3083ef34cde240868e7ea23e42fd3032 2024-12-07T18:20:29,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/59b442e31ce64864952b908ec2c1471e 2024-12-07T18:20:29,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/0c05722e95fc4dc9bb426727a136902d 2024-12-07T18:20:29,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/cc6b35186dd0469aaedc60720fd10e6b 2024-12-07T18:20:29,789 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/recovered.edits/502.seqid, newMaxSeqId=502, maxSeqId=1 2024-12-07T18:20:29,790 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59. 2024-12-07T18:20:29,790 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for 70dba895e74ad497e9ce7e920215ba59: 2024-12-07T18:20:29,791 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed 70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:29,792 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=70dba895e74ad497e9ce7e920215ba59, regionState=CLOSED 2024-12-07T18:20:29,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-07T18:20:29,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure 70dba895e74ad497e9ce7e920215ba59, server=8a7a030b35db,45237,1733595542335 in 2.1770 sec 2024-12-07T18:20:29,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-07T18:20:29,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=70dba895e74ad497e9ce7e920215ba59, UNASSIGN in 2.1810 sec 2024-12-07T18:20:29,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-07T18:20:29,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.1830 sec 2024-12-07T18:20:29,797 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595629797"}]},"ts":"1733595629797"} 2024-12-07T18:20:29,798 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:20:29,800 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:20:29,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.1950 sec 2024-12-07T18:20:30,639 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:20:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-07T18:20:31,713 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-07T18:20:31,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:20:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,715 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T18:20:31,715 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=99, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,716 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:31,718 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/recovered.edits] 2024-12-07T18:20:31,721 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/30ff459099634d36a257cce6b75be197 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/30ff459099634d36a257cce6b75be197 2024-12-07T18:20:31,722 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4f682eab218b43b192826bb41221641c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/4f682eab218b43b192826bb41221641c 2024-12-07T18:20:31,723 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6bde1e6c7897471f807d8c8a9e678ba2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/6bde1e6c7897471f807d8c8a9e678ba2 2024-12-07T18:20:31,724 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/83396209dfba4b16926636761fafaf31 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/83396209dfba4b16926636761fafaf31 2024-12-07T18:20:31,725 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dbcbfb120851411c8d43c733583eda5f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/A/dbcbfb120851411c8d43c733583eda5f 2024-12-07T18:20:31,726 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/362eda73e85a401882500bffdb29a15a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/362eda73e85a401882500bffdb29a15a 2024-12-07T18:20:31,727 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ba68295da6e04510bcc85f638d44537c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ba68295da6e04510bcc85f638d44537c 2024-12-07T18:20:31,728 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cbc4c8ab68474d1d893864a9bd2ba893 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/cbc4c8ab68474d1d893864a9bd2ba893 2024-12-07T18:20:31,729 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d6e71f807f9e415db093463d930fa3e1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/d6e71f807f9e415db093463d930fa3e1 2024-12-07T18:20:31,730 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ea8af9adb4ec4a10816ca722c41530be to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/B/ea8af9adb4ec4a10816ca722c41530be 2024-12-07T18:20:31,732 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/02d742d5136349f6ab7c05a7ff22980d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/02d742d5136349f6ab7c05a7ff22980d 2024-12-07T18:20:31,733 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/459bd0c7dbe54f2fa88d70a7cd6a1da9 2024-12-07T18:20:31,734 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/4cee877893b641ed92740a94863b02c7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/4cee877893b641ed92740a94863b02c7 2024-12-07T18:20:31,735 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/6289d5ced001443eb11d928841ceb270 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/6289d5ced001443eb11d928841ceb270 2024-12-07T18:20:31,736 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d1e81061b0694e089d7df7a07914594e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/C/d1e81061b0694e089d7df7a07914594e 2024-12-07T18:20:31,738 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/recovered.edits/502.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59/recovered.edits/502.seqid 2024-12-07T18:20:31,739 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/70dba895e74ad497e9ce7e920215ba59 2024-12-07T18:20:31,739 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:20:31,740 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=99, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,744 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:20:31,745 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:20:31,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=99, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:20:31,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595631746"}]},"ts":"9223372036854775807"} 2024-12-07T18:20:31,748 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:20:31,748 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 70dba895e74ad497e9ce7e920215ba59, NAME => 'TestAcidGuarantees,,1733595605053.70dba895e74ad497e9ce7e920215ba59.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:20:31,748 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:20:31,748 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595631748"}]},"ts":"9223372036854775807"} 2024-12-07T18:20:31,749 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:20:31,752 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=99, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 39 msec 2024-12-07T18:20:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T18:20:31,816 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-12-07T18:20:31,826 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 238) - Thread LEAK? -, OpenFileDescriptor=453 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=500 (was 481) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7005 (was 7070) 2024-12-07T18:20:31,836 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=500, ProcessCount=11, AvailableMemoryMB=7004 2024-12-07T18:20:31,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:20:31,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:20:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:31,839 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:20:31,839 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:31,839 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 100 2024-12-07T18:20:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:31,840 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:20:31,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742207_1383 (size=963) 2024-12-07T18:20:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:32,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-07T18:20:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:32,246 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:20:32,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742208_1384 (size=53) 2024-12-07T18:20:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 81dd0984f21c5170abf1b07080819b3d, disabling compactions & flushes 2024-12-07T18:20:32,652 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. after waiting 0 ms 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,652 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,652 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:32,653 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:20:32,653 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595632653"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595632653"}]},"ts":"1733595632653"} 2024-12-07T18:20:32,654 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:20:32,655 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:20:32,655 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595632655"}]},"ts":"1733595632655"} 2024-12-07T18:20:32,656 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:20:32,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, ASSIGN}] 2024-12-07T18:20:32,660 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, ASSIGN 2024-12-07T18:20:32,661 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:20:32,811 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:32,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:32,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:32,964 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:32,967 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,967 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:20:32,968 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,968 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:20:32,968 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,968 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,969 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,970 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:32,970 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName A 2024-12-07T18:20:32,970 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:32,971 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:32,971 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,971 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:32,972 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName B 2024-12-07T18:20:32,972 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:32,972 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:32,972 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,973 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:32,973 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName C 2024-12-07T18:20:32,973 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:32,973 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:32,973 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,974 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,974 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,975 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:20:32,977 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:32,978 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:20:32,979 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 81dd0984f21c5170abf1b07080819b3d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69463750, jitterRate=0.03509053587913513}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:20:32,979 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:32,980 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., pid=102, masterSystemTime=1733595632964 2024-12-07T18:20:32,981 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,981 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:32,981 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:32,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-07T18:20:32,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 in 170 msec 2024-12-07T18:20:32,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-07T18:20:32,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, ASSIGN in 324 msec 2024-12-07T18:20:32,985 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:20:32,985 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595632985"}]},"ts":"1733595632985"} 2024-12-07T18:20:32,986 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:20:32,988 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:20:32,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-12-07T18:20:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-07T18:20:33,943 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-07T18:20:33,945 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ecf33fc to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47290c4 2024-12-07T18:20:33,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7267b857, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:33,949 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:33,951 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:33,951 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:20:33,952 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:20:33,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:20:33,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:20:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-07T18:20:33,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742209_1385 (size=999) 2024-12-07T18:20:34,364 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-07T18:20:34,364 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-07T18:20:34,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:20:34,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, REOPEN/MOVE}] 2024-12-07T18:20:34,369 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, REOPEN/MOVE 2024-12-07T18:20:34,370 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,371 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:20:34,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; CloseRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:34,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,522 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(124): Close 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,522 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:20:34,522 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1681): Closing 81dd0984f21c5170abf1b07080819b3d, disabling compactions & flushes 2024-12-07T18:20:34,522 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,522 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,522 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. after waiting 0 ms 2024-12-07T18:20:34,522 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,526 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-07T18:20:34,526 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,526 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1635): Region close journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:34,526 WARN [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionServer(3786): Not adding moved region record: 81dd0984f21c5170abf1b07080819b3d to self. 2024-12-07T18:20:34,527 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(170): Closed 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,528 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=CLOSED 2024-12-07T18:20:34,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-12-07T18:20:34,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; CloseRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 in 157 msec 2024-12-07T18:20:34,530 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, REOPEN/MOVE; state=CLOSED, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=true 2024-12-07T18:20:34,680 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=105, state=RUNNABLE; OpenRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:20:34,832 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,835 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,836 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7285): Opening region: {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:20:34,836 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,836 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:20:34,836 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7327): checking encryption for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,836 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7330): checking classloading for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,837 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,838 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:34,839 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName A 2024-12-07T18:20:34,840 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:34,840 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:34,841 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,841 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:34,841 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName B 2024-12-07T18:20:34,841 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:34,842 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:34,842 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,842 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:20:34,842 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81dd0984f21c5170abf1b07080819b3d columnFamilyName C 2024-12-07T18:20:34,842 DEBUG [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:34,843 INFO [StoreOpener-81dd0984f21c5170abf1b07080819b3d-1 {}] regionserver.HStore(327): Store=81dd0984f21c5170abf1b07080819b3d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:20:34,843 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,844 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,844 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,846 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:20:34,847 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1085): writing seq id for 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,847 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1102): Opened 81dd0984f21c5170abf1b07080819b3d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68263217, jitterRate=0.017201200127601624}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:20:34,848 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1001): Region open journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:34,849 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., pid=107, masterSystemTime=1733595634832 2024-12-07T18:20:34,850 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,850 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:34,850 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=OPEN, openSeqNum=5, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=105 2024-12-07T18:20:34,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=105, state=SUCCESS; OpenRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 in 170 msec 2024-12-07T18:20:34,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-07T18:20:34,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, REOPEN/MOVE in 483 msec 2024-12-07T18:20:34,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-07T18:20:34,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-12-07T18:20:34,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-12-07T18:20:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-07T18:20:34,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c8cc27b to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c68919e 2024-12-07T18:20:34,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63822144, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x081cac4f to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@601038b3 2024-12-07T18:20:34,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@126abdf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,869 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64a04d7a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59434fd 2024-12-07T18:20:34,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42d6bca6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,873 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3268230a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167fda66 2024-12-07T18:20:34,876 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61bb7783, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,876 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d7912a0 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bd5983 2024-12-07T18:20:34,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0031d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,880 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b7324d5 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5434c92 2024-12-07T18:20:34,883 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c186a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,883 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d930fb1 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52abed4d 2024-12-07T18:20:34,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d80c576, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,887 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x114e6211 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c0234f0 2024-12-07T18:20:34,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a2e973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,891 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x191ae36a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14b2e10d 2024-12-07T18:20:34,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@145b6b99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,895 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x133cc1f0 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1085e013 2024-12-07T18:20:34,898 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fcd5639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:20:34,901 DEBUG [hconnection-0x5856ef85-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:34,902 DEBUG [hconnection-0xf3f5be3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-07T18:20:34,902 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,902 DEBUG [hconnection-0x36c46e68-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:34,903 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:34,902 DEBUG [hconnection-0x2ee36745-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,903 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,903 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,903 DEBUG [hconnection-0x4e27e0e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,904 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:34,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:34,904 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,904 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,905 DEBUG [hconnection-0x606c5dbb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,905 DEBUG [hconnection-0x7186f4d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,905 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,906 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,908 DEBUG [hconnection-0x521d17df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,909 DEBUG [hconnection-0x1f83f6b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,910 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,910 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,911 DEBUG [hconnection-0x1661fba3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:20:34,912 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:20:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:34,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:34,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:34,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207813663ac95ae4fb49118d76637dd88ba_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595634913/Put/seqid=0 2024-12-07T18:20:34,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742210_1386 (size=12154) 2024-12-07T18:20:34,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595694987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595694988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595694988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595694988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:34,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595694989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:35,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595695090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595695095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595695095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595695095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595695095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:35,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595695294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595695297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595695297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595695298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595695298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,310 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:20:35,341 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:35,346 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207813663ac95ae4fb49118d76637dd88ba_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207813663ac95ae4fb49118d76637dd88ba_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:35,346 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/cefb51ff6e58465a8a466042ccce5094, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:35,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/cefb51ff6e58465a8a466042ccce5094 is 175, key is test_row_0/A:col10/1733595634913/Put/seqid=0 2024-12-07T18:20:35,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742211_1387 (size=30955) 2024-12-07T18:20:35,362 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/cefb51ff6e58465a8a466042ccce5094 2024-12-07T18:20:35,364 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,365 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/5df10479964d4daebdf4eb799bd42108 is 50, key is test_row_0/B:col10/1733595634913/Put/seqid=0 2024-12-07T18:20:35,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742212_1388 (size=12001) 2024-12-07T18:20:35,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:35,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595695597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595695603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595695603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595695603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595695604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,670 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/5df10479964d4daebdf4eb799bd42108 2024-12-07T18:20:35,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5bae64496d0b4e8ba52e1ea688c4c0b2 is 50, key is test_row_0/C:col10/1733595634913/Put/seqid=0 2024-12-07T18:20:35,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:35,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:35,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742213_1389 (size=12001) 2024-12-07T18:20:35,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5bae64496d0b4e8ba52e1ea688c4c0b2 2024-12-07T18:20:35,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/cefb51ff6e58465a8a466042ccce5094 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094 2024-12-07T18:20:35,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094, entries=150, sequenceid=15, filesize=30.2 K 2024-12-07T18:20:35,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/5df10479964d4daebdf4eb799bd42108 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108 2024-12-07T18:20:35,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108, entries=150, sequenceid=15, filesize=11.7 K 2024-12-07T18:20:35,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5bae64496d0b4e8ba52e1ea688c4c0b2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2 2024-12-07T18:20:35,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2, entries=150, sequenceid=15, filesize=11.7 K 2024-12-07T18:20:35,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 81dd0984f21c5170abf1b07080819b3d in 944ms, sequenceid=15, compaction requested=false 2024-12-07T18:20:35,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:35,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:35,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:35,977 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:35,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207435c2a4b3d9743b290da8eeeedb4167e_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595634985/Put/seqid=0 2024-12-07T18:20:35,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742214_1390 (size=12154) 2024-12-07T18:20:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:36,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:36,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595696113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595696113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595696115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595696115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595696116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595696220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595696220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595696220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595696220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595696220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:36,394 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207435c2a4b3d9743b290da8eeeedb4167e_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207435c2a4b3d9743b290da8eeeedb4167e_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/2f77bc50c3d6455fb3c8125fa8927973, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/2f77bc50c3d6455fb3c8125fa8927973 is 175, key is test_row_0/A:col10/1733595634985/Put/seqid=0 2024-12-07T18:20:36,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742215_1391 (size=30955) 2024-12-07T18:20:36,401 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/2f77bc50c3d6455fb3c8125fa8927973 2024-12-07T18:20:36,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d4d398f92d764cbeb21bc7f80e99c6bc is 50, key is test_row_0/B:col10/1733595634985/Put/seqid=0 2024-12-07T18:20:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742216_1392 (size=12001) 2024-12-07T18:20:36,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595696424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595696424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595696426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595696426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595696427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595696728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595696729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595696730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595696732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595696732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:36,822 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d4d398f92d764cbeb21bc7f80e99c6bc 2024-12-07T18:20:36,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/20566051e8694c469575d562c7af2eae is 50, key is test_row_0/C:col10/1733595634985/Put/seqid=0 2024-12-07T18:20:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742217_1393 (size=12001) 2024-12-07T18:20:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:37,234 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/20566051e8694c469575d562c7af2eae 2024-12-07T18:20:37,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:37,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595697232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:37,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/2f77bc50c3d6455fb3c8125fa8927973 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973 2024-12-07T18:20:37,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:37,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595697235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:37,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:37,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595697239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:37,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:37,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595697240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:37,242 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973, entries=150, sequenceid=40, filesize=30.2 K 2024-12-07T18:20:37,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d4d398f92d764cbeb21bc7f80e99c6bc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc 2024-12-07T18:20:37,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:37,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595697242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:37,246 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc, entries=150, sequenceid=40, filesize=11.7 K 2024-12-07T18:20:37,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/20566051e8694c469575d562c7af2eae as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae 2024-12-07T18:20:37,250 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae, entries=150, sequenceid=40, filesize=11.7 K 2024-12-07T18:20:37,250 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 81dd0984f21c5170abf1b07080819b3d in 1273ms, sequenceid=40, compaction requested=false 2024-12-07T18:20:37,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:37,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:37,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-07T18:20:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-07T18:20:37,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-07T18:20:37,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3480 sec 2024-12-07T18:20:37,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.3530 sec 2024-12-07T18:20:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:38,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:20:38,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:38,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:38,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:38,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:38,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:38,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:38,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077860c7e8d25a49cfb8859328e2812ac1_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:38,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742218_1394 (size=14594) 2024-12-07T18:20:38,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595698269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595698271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595698272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595698274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595698274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595698375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595698379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595698380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595698380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595698381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595698580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595698586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595698587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595698588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595698588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,655 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:38,659 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077860c7e8d25a49cfb8859328e2812ac1_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077860c7e8d25a49cfb8859328e2812ac1_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:38,660 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/238c907bb2c84e3bb42f157e39169008, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:38,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/238c907bb2c84e3bb42f157e39169008 is 175, key is test_row_0/A:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:38,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742219_1395 (size=39549) 2024-12-07T18:20:38,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595698891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595698891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595698891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595698892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:38,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595698893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-07T18:20:39,007 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-07T18:20:39,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-07T18:20:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:39,010 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:39,011 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:39,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:39,066 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/238c907bb2c84e3bb42f157e39169008 2024-12-07T18:20:39,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/236abbbb534b4132b2a02de78bc4a1ab is 50, key is test_row_0/B:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:39,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742220_1396 (size=12001) 2024-12-07T18:20:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:39,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-07T18:20:39,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:39,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:39,316 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-07T18:20:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595699397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595699398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595699398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:39,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595699399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:39,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595699401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,469 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-07T18:20:39,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:39,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:39,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/236abbbb534b4132b2a02de78bc4a1ab 2024-12-07T18:20:39,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d82a7d694a724384b961564ab66dd31e is 50, key is test_row_0/C:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:39,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742221_1397 (size=12001) 2024-12-07T18:20:39,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d82a7d694a724384b961564ab66dd31e 2024-12-07T18:20:39,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/238c907bb2c84e3bb42f157e39169008 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008 2024-12-07T18:20:39,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008, entries=200, sequenceid=52, filesize=38.6 K 2024-12-07T18:20:39,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/236abbbb534b4132b2a02de78bc4a1ab as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab 2024-12-07T18:20:39,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab, entries=150, sequenceid=52, filesize=11.7 K 2024-12-07T18:20:39,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d82a7d694a724384b961564ab66dd31e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e 2024-12-07T18:20:39,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e, entries=150, sequenceid=52, filesize=11.7 K 2024-12-07T18:20:39,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 81dd0984f21c5170abf1b07080819b3d in 1269ms, sequenceid=52, compaction requested=true 2024-12-07T18:20:39,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:39,513 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:39,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:39,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:39,513 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:39,514 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:39,514 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/A is initiating minor compaction (all files) 2024-12-07T18:20:39,514 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/A in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,514 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=99.1 K 2024-12-07T18:20:39,514 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,514 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008] 2024-12-07T18:20:39,515 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:39,515 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/B is initiating minor compaction (all files) 2024-12-07T18:20:39,515 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/B in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,515 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=35.2 K 2024-12-07T18:20:39,515 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cefb51ff6e58465a8a466042ccce5094, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595634909 2024-12-07T18:20:39,515 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f77bc50c3d6455fb3c8125fa8927973, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595634985 2024-12-07T18:20:39,515 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5df10479964d4daebdf4eb799bd42108, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595634909 2024-12-07T18:20:39,516 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d4d398f92d764cbeb21bc7f80e99c6bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595634985 2024-12-07T18:20:39,517 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 238c907bb2c84e3bb42f157e39169008, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:39,518 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 236abbbb534b4132b2a02de78bc4a1ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:39,526 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:39,528 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412077b850bc6f24c4c6db793473122d3c297_81dd0984f21c5170abf1b07080819b3d store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:39,529 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#B#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:39,530 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/48ca80d9f7e44dd8b46774ebd20266a9 is 50, key is test_row_0/B:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:39,530 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412077b850bc6f24c4c6db793473122d3c297_81dd0984f21c5170abf1b07080819b3d, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:39,531 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077b850bc6f24c4c6db793473122d3c297_81dd0984f21c5170abf1b07080819b3d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:39,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742223_1399 (size=4469) 2024-12-07T18:20:39,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742222_1398 (size=12104) 2024-12-07T18:20:39,561 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/48ca80d9f7e44dd8b46774ebd20266a9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/48ca80d9f7e44dd8b46774ebd20266a9 2024-12-07T18:20:39,567 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/B of 81dd0984f21c5170abf1b07080819b3d into 48ca80d9f7e44dd8b46774ebd20266a9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:39,567 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:39,567 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/B, priority=13, startTime=1733595639513; duration=0sec 2024-12-07T18:20:39,567 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:39,567 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:B 2024-12-07T18:20:39,567 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:39,568 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:39,568 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/C is initiating minor compaction (all files) 2024-12-07T18:20:39,568 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/C in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,568 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=35.2 K 2024-12-07T18:20:39,569 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bae64496d0b4e8ba52e1ea688c4c0b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595634909 2024-12-07T18:20:39,569 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 20566051e8694c469575d562c7af2eae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595634985 2024-12-07T18:20:39,570 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d82a7d694a724384b961564ab66dd31e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:39,579 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#C#compaction#341 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:39,579 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7d4326cfceed4328a9c29e1c05ec5370 is 50, key is test_row_0/C:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:39,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742224_1400 (size=12104) 2024-12-07T18:20:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:39,622 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:39,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-07T18:20:39,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:39,623 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:20:39,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:39,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:39,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:39,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:39,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:39,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:39,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412079c0a6e52d5714cfd8073aa697e918273_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595638273/Put/seqid=0 2024-12-07T18:20:39,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742225_1401 (size=12154) 2024-12-07T18:20:39,951 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#A#compaction#339 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:39,951 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/659123d91f0c425bbf366fae64196e22 is 175, key is test_row_0/A:col10/1733595636115/Put/seqid=0 2024-12-07T18:20:39,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742226_1402 (size=31058) 2024-12-07T18:20:39,989 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7d4326cfceed4328a9c29e1c05ec5370 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7d4326cfceed4328a9c29e1c05ec5370 2024-12-07T18:20:39,994 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/C of 81dd0984f21c5170abf1b07080819b3d into 7d4326cfceed4328a9c29e1c05ec5370(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:39,994 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:39,994 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/C, priority=13, startTime=1733595639514; duration=0sec 2024-12-07T18:20:39,994 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:39,994 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:C 2024-12-07T18:20:40,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:40,041 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412079c0a6e52d5714cfd8073aa697e918273_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079c0a6e52d5714cfd8073aa697e918273_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:40,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/a17023cf8a124c438a7864bbe7de557d, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:40,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/a17023cf8a124c438a7864bbe7de557d is 175, key is test_row_0/A:col10/1733595638273/Put/seqid=0 2024-12-07T18:20:40,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742227_1403 (size=30955) 2024-12-07T18:20:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:40,338 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:20:40,364 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/659123d91f0c425bbf366fae64196e22 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22 2024-12-07T18:20:40,369 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/A of 81dd0984f21c5170abf1b07080819b3d into 659123d91f0c425bbf366fae64196e22(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:40,369 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:40,369 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/A, priority=13, startTime=1733595639513; duration=0sec 2024-12-07T18:20:40,369 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:40,369 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:A 2024-12-07T18:20:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:40,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:40,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595700415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595700415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595700416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595700416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595700417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,449 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/a17023cf8a124c438a7864bbe7de557d 2024-12-07T18:20:40,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/27322be48d304ed1a9c15700bfb5d2cf is 50, key is test_row_0/B:col10/1733595638273/Put/seqid=0 2024-12-07T18:20:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742228_1404 (size=12001) 2024-12-07T18:20:40,461 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/27322be48d304ed1a9c15700bfb5d2cf 2024-12-07T18:20:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d8fd566254584a30bc300be1643ca16c is 50, key is test_row_0/C:col10/1733595638273/Put/seqid=0 2024-12-07T18:20:40,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742229_1405 (size=12001) 2024-12-07T18:20:40,473 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d8fd566254584a30bc300be1643ca16c 2024-12-07T18:20:40,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/a17023cf8a124c438a7864bbe7de557d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d 2024-12-07T18:20:40,482 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d, entries=150, sequenceid=77, filesize=30.2 K 2024-12-07T18:20:40,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/27322be48d304ed1a9c15700bfb5d2cf as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf 2024-12-07T18:20:40,488 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf, entries=150, sequenceid=77, filesize=11.7 K 2024-12-07T18:20:40,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d8fd566254584a30bc300be1643ca16c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c 2024-12-07T18:20:40,492 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c, entries=150, sequenceid=77, filesize=11.7 K 2024-12-07T18:20:40,493 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 81dd0984f21c5170abf1b07080819b3d in 871ms, sequenceid=77, compaction requested=false 2024-12-07T18:20:40,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:40,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:40,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-07T18:20:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-07T18:20:40,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-07T18:20:40,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4830 sec 2024-12-07T18:20:40,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.4870 sec 2024-12-07T18:20:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:40,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:40,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207d356367899a54f9ebdf44d2a263fa6ad_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742230_1406 (size=14594) 2024-12-07T18:20:40,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595700548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595700549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595700553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595700553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595700655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595700655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595700655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595700659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595700859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595700860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595700860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595700864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:40,937 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:40,941 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207d356367899a54f9ebdf44d2a263fa6ad_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d356367899a54f9ebdf44d2a263fa6ad_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:40,942 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4fd5a7a098524816a7cb3fc00ee1ad86, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:40,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4fd5a7a098524816a7cb3fc00ee1ad86 is 175, key is test_row_0/A:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:40,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742231_1407 (size=39549) 2024-12-07T18:20:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T18:20:41,114 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-07T18:20:41,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:41,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-07T18:20:41,117 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-07T18:20:41,118 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:41,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:41,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595701164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595701164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595701166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595701170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-07T18:20:41,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-07T18:20:41,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:41,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,349 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4fd5a7a098524816a7cb3fc00ee1ad86 2024-12-07T18:20:41,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/7ea6f137993b468e9762afe7cf3bcdbc is 50, key is test_row_0/B:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:41,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742232_1408 (size=12001) 2024-12-07T18:20:41,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-07T18:20:41,422 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-07T18:20:41,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:41,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-07T18:20:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595701668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595701674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595701674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:41,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595701678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-07T18:20:41,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-07T18:20:41,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:41,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:41,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/7ea6f137993b468e9762afe7cf3bcdbc 2024-12-07T18:20:41,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/477ae2faba7b407a9ceeaf8127abec63 is 50, key is test_row_0/C:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:41,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742233_1409 (size=12001) 2024-12-07T18:20:41,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/477ae2faba7b407a9ceeaf8127abec63 2024-12-07T18:20:41,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4fd5a7a098524816a7cb3fc00ee1ad86 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86 2024-12-07T18:20:41,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86, entries=200, sequenceid=92, filesize=38.6 K 2024-12-07T18:20:41,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/7ea6f137993b468e9762afe7cf3bcdbc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc 2024-12-07T18:20:41,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc, entries=150, sequenceid=92, filesize=11.7 K 2024-12-07T18:20:41,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/477ae2faba7b407a9ceeaf8127abec63 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63 2024-12-07T18:20:41,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63, entries=150, sequenceid=92, filesize=11.7 K 2024-12-07T18:20:41,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 81dd0984f21c5170abf1b07080819b3d in 1275ms, sequenceid=92, compaction requested=true 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:41,796 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:41,796 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:41,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/B is initiating minor compaction (all files) 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/A is initiating minor compaction (all files) 2024-12-07T18:20:41,797 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/B in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,797 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/A in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,797 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/48ca80d9f7e44dd8b46774ebd20266a9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=35.3 K 2024-12-07T18:20:41,797 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=99.2 K 2024-12-07T18:20:41,797 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86] 2024-12-07T18:20:41,797 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 48ca80d9f7e44dd8b46774ebd20266a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:41,798 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 27322be48d304ed1a9c15700bfb5d2cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733595638271 2024-12-07T18:20:41,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 659123d91f0c425bbf366fae64196e22, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:41,798 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ea6f137993b468e9762afe7cf3bcdbc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640415 2024-12-07T18:20:41,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a17023cf8a124c438a7864bbe7de557d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733595638271 2024-12-07T18:20:41,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fd5a7a098524816a7cb3fc00ee1ad86, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640409 2024-12-07T18:20:41,804 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:41,805 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#B#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:41,805 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/12e735bff11349cd8035a7a19cdecb30 is 50, key is test_row_0/B:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:41,806 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207ba76c152809c43aab13a6a3aa6891e7b_81dd0984f21c5170abf1b07080819b3d store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:41,808 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207ba76c152809c43aab13a6a3aa6891e7b_81dd0984f21c5170abf1b07080819b3d, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:41,808 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207ba76c152809c43aab13a6a3aa6891e7b_81dd0984f21c5170abf1b07080819b3d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:41,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742235_1411 (size=4469) 2024-12-07T18:20:41,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742234_1410 (size=12207) 2024-12-07T18:20:41,819 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/12e735bff11349cd8035a7a19cdecb30 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/12e735bff11349cd8035a7a19cdecb30 2024-12-07T18:20:41,824 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/B of 81dd0984f21c5170abf1b07080819b3d into 12e735bff11349cd8035a7a19cdecb30(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:41,824 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:41,824 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/B, priority=13, startTime=1733595641796; duration=0sec 2024-12-07T18:20:41,824 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:41,824 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:B 2024-12-07T18:20:41,824 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:41,825 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:41,825 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/C is initiating minor compaction (all files) 2024-12-07T18:20:41,825 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/C in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,825 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7d4326cfceed4328a9c29e1c05ec5370, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=35.3 K 2024-12-07T18:20:41,825 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d4326cfceed4328a9c29e1c05ec5370, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733595636113 2024-12-07T18:20:41,826 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d8fd566254584a30bc300be1643ca16c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733595638271 2024-12-07T18:20:41,826 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 477ae2faba7b407a9ceeaf8127abec63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640415 2024-12-07T18:20:41,835 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#C#compaction#350 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:41,836 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/fee36b812b6c48b484f0e9cb1ccad99d is 50, key is test_row_0/C:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:41,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742236_1412 (size=12207) 2024-12-07T18:20:41,849 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/fee36b812b6c48b484f0e9cb1ccad99d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/fee36b812b6c48b484f0e9cb1ccad99d 2024-12-07T18:20:41,854 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/C of 81dd0984f21c5170abf1b07080819b3d into fee36b812b6c48b484f0e9cb1ccad99d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:41,854 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:41,854 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/C, priority=13, startTime=1733595641796; duration=0sec 2024-12-07T18:20:41,854 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:41,854 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:C 2024-12-07T18:20:41,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:41,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-07T18:20:41,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,888 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:20:41,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:41,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:41,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:41,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:41,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:41,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e010bf8086074bbbbe32843c2bb77c2b_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595640552/Put/seqid=0 2024-12-07T18:20:41,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742237_1413 (size=12154) 2024-12-07T18:20:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,908 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e010bf8086074bbbbe32843c2bb77c2b_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e010bf8086074bbbbe32843c2bb77c2b_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:41,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/1e44fef0da8440c297960450cdec7d4e, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:41,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/1e44fef0da8440c297960450cdec7d4e is 175, key is test_row_0/A:col10/1733595640552/Put/seqid=0 2024-12-07T18:20:41,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742238_1414 (size=30955) 2024-12-07T18:20:41,918 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/1e44fef0da8440c297960450cdec7d4e 2024-12-07T18:20:41,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/8d5078742f94458394e5be86a761a603 is 50, key is test_row_0/B:col10/1733595640552/Put/seqid=0 2024-12-07T18:20:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742239_1415 (size=12001) 2024-12-07T18:20:41,933 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/8d5078742f94458394e5be86a761a603 2024-12-07T18:20:41,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/4cc6caa674a843b2936b50b1a973eee5 is 50, key is test_row_0/C:col10/1733595640552/Put/seqid=0 2024-12-07T18:20:41,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742240_1416 (size=12001) 2024-12-07T18:20:41,961 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/4cc6caa674a843b2936b50b1a973eee5 2024-12-07T18:20:41,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/1e44fef0da8440c297960450cdec7d4e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e 2024-12-07T18:20:41,969 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e, entries=150, sequenceid=117, filesize=30.2 K 2024-12-07T18:20:41,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/8d5078742f94458394e5be86a761a603 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603 2024-12-07T18:20:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,974 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603, entries=150, sequenceid=117, filesize=11.7 K 2024-12-07T18:20:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/4cc6caa674a843b2936b50b1a973eee5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5 2024-12-07T18:20:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,980 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5, entries=150, sequenceid=117, filesize=11.7 K 2024-12-07T18:20:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 81dd0984f21c5170abf1b07080819b3d in 94ms, sequenceid=117, compaction requested=false 2024-12-07T18:20:41,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:41,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-07T18:20:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-07T18:20:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 865 msec 2024-12-07T18:20:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 870 msec 2024-12-07T18:20:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-07T18:20:42,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,214 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#A#compaction#349 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,214 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f2c079484f34d918c6bf260076eada2 is 175, key is test_row_0/A:col10/1733595640416/Put/seqid=0 2024-12-07T18:20:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742241_1417 (size=31161) 2024-12-07T18:20:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,221 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-07T18:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,225 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,225 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:42,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:42,226 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f2c079484f34d918c6bf260076eada2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2 2024-12-07T18:20:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/A of 81dd0984f21c5170abf1b07080819b3d into 3f2c079484f34d918c6bf260076eada2(size=30.4 K), total size for store is 60.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:42,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:42,230 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/A, priority=13, startTime=1733595641796; duration=0sec 2024-12-07T18:20:42,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:42,230 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:A 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-07T18:20:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:42,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:42,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:42,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-07T18:20:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-07T18:20:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-07T18:20:42,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-12-07T18:20:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 159 msec 2024-12-07T18:20:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-07T18:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,529 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-07T18:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-07T18:20:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:42,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,686 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120737541f7690fb416596bd01eb15fa1777_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595642571/Put/seqid=0 2024-12-07T18:20:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742242_1418 (size=12154) 2024-12-07T18:20:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:42,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595702746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595702747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595702748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595702749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595702750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:42,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595702855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595702855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595702855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595702855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:42,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595702858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595703059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595703060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595703061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595703061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595703063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:43,109 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120737541f7690fb416596bd01eb15fa1777_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737541f7690fb416596bd01eb15fa1777_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:43,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3606165ddcdf4dbeafed2c0d2c644426, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:43,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3606165ddcdf4dbeafed2c0d2c644426 is 175, key is test_row_0/A:col10/1733595642571/Put/seqid=0 2024-12-07T18:20:43,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742243_1419 (size=30951) 2024-12-07T18:20:43,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:43,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595703364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595703366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595703367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595703369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595703369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,515 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=126, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3606165ddcdf4dbeafed2c0d2c644426 2024-12-07T18:20:43,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/2e5ba0836f694bea8766c25ee550b6fc is 50, key is test_row_0/B:col10/1733595642571/Put/seqid=0 2024-12-07T18:20:43,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742244_1420 (size=9657) 2024-12-07T18:20:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:43,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595703866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595703874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595703874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595703876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:43,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595703876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:43,925 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/2e5ba0836f694bea8766c25ee550b6fc 2024-12-07T18:20:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7e704e7bb1af433199382905297a87fe is 50, key is test_row_0/C:col10/1733595642571/Put/seqid=0 2024-12-07T18:20:43,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742245_1421 (size=9657) 2024-12-07T18:20:44,336 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7e704e7bb1af433199382905297a87fe 2024-12-07T18:20:44,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3606165ddcdf4dbeafed2c0d2c644426 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426 2024-12-07T18:20:44,343 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426, entries=150, sequenceid=126, filesize=30.2 K 2024-12-07T18:20:44,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/2e5ba0836f694bea8766c25ee550b6fc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc 2024-12-07T18:20:44,348 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc, entries=100, sequenceid=126, filesize=9.4 K 2024-12-07T18:20:44,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7e704e7bb1af433199382905297a87fe as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe 2024-12-07T18:20:44,352 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe, entries=100, sequenceid=126, filesize=9.4 K 2024-12-07T18:20:44,352 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=181.14 KB/185490 for 81dd0984f21c5170abf1b07080819b3d in 1667ms, sequenceid=126, compaction requested=true 2024-12-07T18:20:44,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:44,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:44,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-07T18:20:44,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-07T18:20:44,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-07T18:20:44,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8210 sec 2024-12-07T18:20:44,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8240 sec 2024-12-07T18:20:44,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-07T18:20:44,637 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-07T18:20:44,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:44,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-07T18:20:44,639 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:44,640 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:44,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:44,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:44,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:44,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-07T18:20:44,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:44,793 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:44,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:44,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071f24c7f0ed3641efa94bc457419088a0_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595642747/Put/seqid=0 2024-12-07T18:20:44,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742246_1422 (size=12304) 2024-12-07T18:20:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:44,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:44,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595704883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595704886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595704886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595704887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595704887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:44,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595704989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:44,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595704993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:45,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:45,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595705195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:45,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:45,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595705199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:45,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:45,208 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071f24c7f0ed3641efa94bc457419088a0_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071f24c7f0ed3641efa94bc457419088a0_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:45,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/580a22dca33640f19186560d730d2512, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:45,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/580a22dca33640f19186560d730d2512 is 175, key is test_row_0/A:col10/1733595642747/Put/seqid=0 2024-12-07T18:20:45,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742247_1423 (size=31105) 2024-12-07T18:20:45,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:45,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:45,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595705501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:45,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:45,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595705505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:45,615 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/580a22dca33640f19186560d730d2512 2024-12-07T18:20:45,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/109c528ed9cf4136b9407fee2803d70d is 50, key is test_row_0/B:col10/1733595642747/Put/seqid=0 2024-12-07T18:20:45,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742248_1424 (size=12151) 2024-12-07T18:20:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:46,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595706008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:46,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595706015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:46,027 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/109c528ed9cf4136b9407fee2803d70d 2024-12-07T18:20:46,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/9e8ec299c4b4476f90274858e8dda150 is 50, key is test_row_0/C:col10/1733595642747/Put/seqid=0 2024-12-07T18:20:46,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742249_1425 (size=12151) 2024-12-07T18:20:46,438 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/9e8ec299c4b4476f90274858e8dda150 2024-12-07T18:20:46,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/580a22dca33640f19186560d730d2512 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512 2024-12-07T18:20:46,446 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512, entries=150, sequenceid=156, filesize=30.4 K 2024-12-07T18:20:46,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/109c528ed9cf4136b9407fee2803d70d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d 2024-12-07T18:20:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,450 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d, entries=150, sequenceid=156, filesize=11.9 K 2024-12-07T18:20:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/9e8ec299c4b4476f90274858e8dda150 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150 2024-12-07T18:20:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,455 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150, entries=150, sequenceid=156, filesize=11.9 K 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 81dd0984f21c5170abf1b07080819b3d in 1664ms, sequenceid=156, compaction requested=true 2024-12-07T18:20:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-07T18:20:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8180 sec 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.8220 sec 2024-12-07T18:20:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-07T18:20:46,745 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-07T18:20:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,748 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,749 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:46,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-07T18:20:46,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:46,902 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:46,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:46,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:46,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120798113c4bca084ebaa5f76b865c963d3d_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595644886/Put/seqid=0 2024-12-07T18:20:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742250_1426 (size=7324) 2024-12-07T18:20:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,938 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120798113c4bca084ebaa5f76b865c963d3d_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120798113c4bca084ebaa5f76b865c963d3d_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f58737546f54e10b7cdd9a6f9c827f4, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f58737546f54e10b7cdd9a6f9c827f4 is 175, key is test_row_0/A:col10/1733595644886/Put/seqid=0 2024-12-07T18:20:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742251_1427 (size=13815) 2024-12-07T18:20:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:46,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:46,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:46,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:47,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:47,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595707048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595707049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595707055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595707055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595707056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595707162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595707162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595707166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595707167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595707169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:47,363 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=162, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f58737546f54e10b7cdd9a6f9c827f4 2024-12-07T18:20:47,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/eed0e0bb593d4223aa381880caaa0ea3 is 50, key is test_row_0/B:col10/1733595644886/Put/seqid=0 2024-12-07T18:20:47,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595707370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595707371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595707373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595707374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595707375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742252_1428 (size=7365) 2024-12-07T18:20:47,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595707676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595707676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595707680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595707682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:47,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595707682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:47,778 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/eed0e0bb593d4223aa381880caaa0ea3 2024-12-07T18:20:47,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/ac8307b227914e208a7bc8074017c92e is 50, key is test_row_0/C:col10/1733595644886/Put/seqid=0 2024-12-07T18:20:47,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742253_1429 (size=7365) 2024-12-07T18:20:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:48,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:48,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595708181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:48,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:48,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595708182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:48,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:48,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595708184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:48,190 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/ac8307b227914e208a7bc8074017c92e 2024-12-07T18:20:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/3f58737546f54e10b7cdd9a6f9c827f4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4 2024-12-07T18:20:48,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:48,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595708188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:48,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:48,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595708189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:48,198 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4, entries=50, sequenceid=162, filesize=13.5 K 2024-12-07T18:20:48,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/eed0e0bb593d4223aa381880caaa0ea3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3 2024-12-07T18:20:48,202 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3, entries=50, sequenceid=162, filesize=7.2 K 2024-12-07T18:20:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/ac8307b227914e208a7bc8074017c92e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e 2024-12-07T18:20:48,207 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e, entries=50, sequenceid=162, filesize=7.2 K 2024-12-07T18:20:48,208 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 81dd0984f21c5170abf1b07080819b3d in 1307ms, sequenceid=162, compaction requested=true 2024-12-07T18:20:48,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:48,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:48,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-07T18:20:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-07T18:20:48,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-07T18:20:48,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4610 sec 2024-12-07T18:20:48,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.4680 sec 2024-12-07T18:20:48,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-07T18:20:48,852 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-07T18:20:48,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-07T18:20:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:48,855 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:48,858 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:48,859 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:48,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:49,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:49,011 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207f66ff8e6654b41ec989da72a5f076b61_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595647055/Put/seqid=0 2024-12-07T18:20:49,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742254_1430 (size=12304) 2024-12-07T18:20:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:49,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:49,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:49,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595709196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595709200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595709200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595709201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595709203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595709307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595709308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:49,431 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207f66ff8e6654b41ec989da72a5f076b61_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f66ff8e6654b41ec989da72a5f076b61_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:49,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/dfff903b378141dbbdb5c50181f82693, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:49,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/dfff903b378141dbbdb5c50181f82693 is 175, key is test_row_0/A:col10/1733595647055/Put/seqid=0 2024-12-07T18:20:49,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742255_1431 (size=31105) 2024-12-07T18:20:49,438 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=192, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/dfff903b378141dbbdb5c50181f82693 2024-12-07T18:20:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/57d931af010a451bafbbd4c5f0296bf5 is 50, key is test_row_0/B:col10/1733595647055/Put/seqid=0 2024-12-07T18:20:49,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742256_1432 (size=12151) 2024-12-07T18:20:49,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:49,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595709513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595709514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595709819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595709820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:49,850 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/57d931af010a451bafbbd4c5f0296bf5 2024-12-07T18:20:49,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/8a6d8856764b462abbc2bd39145989be is 50, key is test_row_0/C:col10/1733595647055/Put/seqid=0 2024-12-07T18:20:49,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742257_1433 (size=12151) 2024-12-07T18:20:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:50,261 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/8a6d8856764b462abbc2bd39145989be 2024-12-07T18:20:50,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/dfff903b378141dbbdb5c50181f82693 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693 2024-12-07T18:20:50,270 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693, entries=150, sequenceid=192, filesize=30.4 K 2024-12-07T18:20:50,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/57d931af010a451bafbbd4c5f0296bf5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5 2024-12-07T18:20:50,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,275 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5, entries=150, sequenceid=192, filesize=11.9 K 2024-12-07T18:20:50,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/8a6d8856764b462abbc2bd39145989be as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,281 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be, entries=150, sequenceid=192, filesize=11.9 K 2024-12-07T18:20:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 81dd0984f21c5170abf1b07080819b3d in 1271ms, sequenceid=192, compaction requested=true 2024-12-07T18:20:50,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-07T18:20:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-07T18:20:50,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4240 sec 2024-12-07T18:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.4320 sec 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:50,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:50,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:50,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072dd417852fbd434587c6db58d3ed7588_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:50,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742258_1434 (size=17284) 2024-12-07T18:20:50,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595710457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595710466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595710569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595710574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595710775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595710784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:50,796 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:50,799 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412072dd417852fbd434587c6db58d3ed7588_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072dd417852fbd434587c6db58d3ed7588_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:50,800 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/89f0d080973b43ebb128f48043f101dc, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:50,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/89f0d080973b43ebb128f48043f101dc is 175, key is test_row_0/A:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:50,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742259_1435 (size=48389) 2024-12-07T18:20:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T18:20:50,960 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-07T18:20:50,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:20:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-07T18:20:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:50,963 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:20:50,963 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:20:50,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:20:51,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:51,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595711083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595711088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,115 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,205 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=204, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/89f0d080973b43ebb128f48043f101dc 2024-12-07T18:20:51,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 is 50, key is test_row_0/B:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:51,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742260_1436 (size=12151) 2024-12-07T18:20:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595711211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595711213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,219 DEBUG [Thread-1724 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:51,219 DEBUG [Thread-1720 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:51,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595711215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,222 DEBUG [Thread-1718 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:51,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:51,268 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,420 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:51,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595711592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:51,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595711597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 2024-12-07T18:20:51,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7c3c336311d2417fa5e640edc8b484ed is 50, key is test_row_0/C:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742261_1437 (size=12151) 2024-12-07T18:20:51,726 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:51,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:51,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:51,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:51,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:52,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7c3c336311d2417fa5e640edc8b484ed 2024-12-07T18:20:52,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:52,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:52,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:52,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:52,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/89f0d080973b43ebb128f48043f101dc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc 2024-12-07T18:20:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:20:52,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc, entries=250, sequenceid=204, filesize=47.3 K 2024-12-07T18:20:52,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 2024-12-07T18:20:52,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9, entries=150, sequenceid=204, filesize=11.9 K 2024-12-07T18:20:52,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/7c3c336311d2417fa5e640edc8b484ed as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed 2024-12-07T18:20:52,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed, entries=150, sequenceid=204, filesize=11.9 K 2024-12-07T18:20:52,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 81dd0984f21c5170abf1b07080819b3d in 1668ms, sequenceid=204, compaction requested=true 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:52,045 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-07T18:20:52,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:52,045 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-07T18:20:52,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:52,048 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 217481 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-07T18:20:52,048 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 77683 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-07T18:20:52,048 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/B is initiating minor compaction (all files) 2024-12-07T18:20:52,048 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/A is initiating minor compaction (all files) 2024-12-07T18:20:52,048 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/A in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,048 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/B in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,048 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=212.4 K 2024-12-07T18:20:52,048 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/12e735bff11349cd8035a7a19cdecb30, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=75.9 K 2024-12-07T18:20:52,049 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=9 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,049 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc] 2024-12-07T18:20:52,049 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 12e735bff11349cd8035a7a19cdecb30, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640415 2024-12-07T18:20:52,049 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f2c079484f34d918c6bf260076eada2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640415 2024-12-07T18:20:52,049 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d5078742f94458394e5be86a761a603, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733595640546 2024-12-07T18:20:52,049 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e44fef0da8440c297960450cdec7d4e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733595640546 2024-12-07T18:20:52,050 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e5ba0836f694bea8766c25ee550b6fc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733595642571 2024-12-07T18:20:52,050 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3606165ddcdf4dbeafed2c0d2c644426, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733595642571 2024-12-07T18:20:52,050 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 109c528ed9cf4136b9407fee2803d70d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733595642744 2024-12-07T18:20:52,050 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 580a22dca33640f19186560d730d2512, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733595642744 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting eed0e0bb593d4223aa381880caaa0ea3, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1733595644886 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f58737546f54e10b7cdd9a6f9c827f4, keycount=50, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1733595644886 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 57d931af010a451bafbbd4c5f0296bf5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733595647053 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfff903b378141dbbdb5c50181f82693, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733595647053 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting f9e8fe3d100740fa9e3ad1acfce3a4c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595650369 2024-12-07T18:20:52,051 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f0d080973b43ebb128f48043f101dc, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595649199 2024-12-07T18:20:52,062 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:52,064 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#B#compaction#370 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:52,065 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/a6887d2f87de4c96a3fe2814644dd2e9 is 50, key is test_row_0/B:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:52,072 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120757a0c6b5b3e14928a6b3871ff785a884_81dd0984f21c5170abf1b07080819b3d store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:52,075 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120757a0c6b5b3e14928a6b3871ff785a884_81dd0984f21c5170abf1b07080819b3d, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:52,075 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120757a0c6b5b3e14928a6b3871ff785a884_81dd0984f21c5170abf1b07080819b3d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742262_1438 (size=12595) 2024-12-07T18:20:52,083 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/a6887d2f87de4c96a3fe2814644dd2e9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/a6887d2f87de4c96a3fe2814644dd2e9 2024-12-07T18:20:52,087 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/B of 81dd0984f21c5170abf1b07080819b3d into a6887d2f87de4c96a3fe2814644dd2e9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:52,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:52,087 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/B, priority=9, startTime=1733595652045; duration=0sec 2024-12-07T18:20:52,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:52,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:B 2024-12-07T18:20:52,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-07T18:20:52,089 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 77683 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-07T18:20:52,089 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/C is initiating minor compaction (all files) 2024-12-07T18:20:52,089 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/C in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,089 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/fee36b812b6c48b484f0e9cb1ccad99d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=75.9 K 2024-12-07T18:20:52,090 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fee36b812b6c48b484f0e9cb1ccad99d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733595640415 2024-12-07T18:20:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742263_1439 (size=4469) 2024-12-07T18:20:52,091 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cc6caa674a843b2936b50b1a973eee5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733595640546 2024-12-07T18:20:52,091 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e704e7bb1af433199382905297a87fe, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733595642571 2024-12-07T18:20:52,091 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e8ec299c4b4476f90274858e8dda150, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733595642744 2024-12-07T18:20:52,092 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ac8307b227914e208a7bc8074017c92e, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1733595644886 2024-12-07T18:20:52,092 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a6d8856764b462abbc2bd39145989be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733595647053 2024-12-07T18:20:52,093 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c3c336311d2417fa5e640edc8b484ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595650369 2024-12-07T18:20:52,107 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#C#compaction#371 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:52,107 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/6eff63bdb3884236b5a40d4438be41fa is 50, key is test_row_0/C:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:52,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742264_1440 (size=12595) 2024-12-07T18:20:52,122 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/6eff63bdb3884236b5a40d4438be41fa as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6eff63bdb3884236b5a40d4438be41fa 2024-12-07T18:20:52,127 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/C of 81dd0984f21c5170abf1b07080819b3d into 6eff63bdb3884236b5a40d4438be41fa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:52,127 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:52,127 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/C, priority=9, startTime=1733595652045; duration=0sec 2024-12-07T18:20:52,127 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:52,127 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:C 2024-12-07T18:20:52,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:52,185 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:52,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:52,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207406a4fc2581b4310a75b3637610aabdb_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595650454/Put/seqid=0 2024-12-07T18:20:52,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742265_1441 (size=12304) 2024-12-07T18:20:52,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:52,202 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207406a4fc2581b4310a75b3637610aabdb_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207406a4fc2581b4310a75b3637610aabdb_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:52,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:52,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 is 175, key is test_row_0/A:col10/1733595650454/Put/seqid=0 2024-12-07T18:20:52,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742266_1442 (size=31105) 2024-12-07T18:20:52,492 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#A#compaction#369 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:52,492 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/10a6f873e375422cbebe661e2eae97dd is 175, key is test_row_0/A:col10/1733595650369/Put/seqid=0 2024-12-07T18:20:52,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742267_1443 (size=31549) 2024-12-07T18:20:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:52,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. as already flushing 2024-12-07T18:20:52,616 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=230, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 2024-12-07T18:20:52,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/1b78d15f65794d9b8768abdba7e5af80 is 50, key is test_row_0/B:col10/1733595650454/Put/seqid=0 2024-12-07T18:20:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742268_1444 (size=12151) 2024-12-07T18:20:52,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595712659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595712659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595712765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595712765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,901 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/10a6f873e375422cbebe661e2eae97dd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd 2024-12-07T18:20:52,905 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/A of 81dd0984f21c5170abf1b07080819b3d into 10a6f873e375422cbebe661e2eae97dd(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:52,905 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:52,905 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/A, priority=9, startTime=1733595652045; duration=0sec 2024-12-07T18:20:52,905 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:52,905 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:A 2024-12-07T18:20:52,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595712970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:52,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595712970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,028 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/1b78d15f65794d9b8768abdba7e5af80 2024-12-07T18:20:53,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/6c5d5f8dd07d493b8f0a9390a4c22f1e is 50, key is test_row_0/C:col10/1733595650454/Put/seqid=0 2024-12-07T18:20:53,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742269_1445 (size=12151) 2024-12-07T18:20:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:53,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595713276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595713278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,443 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/6c5d5f8dd07d493b8f0a9390a4c22f1e 2024-12-07T18:20:53,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 2024-12-07T18:20:53,451 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5, entries=150, sequenceid=230, filesize=30.4 K 2024-12-07T18:20:53,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/1b78d15f65794d9b8768abdba7e5af80 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80 2024-12-07T18:20:53,455 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80, entries=150, sequenceid=230, filesize=11.9 K 2024-12-07T18:20:53,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/6c5d5f8dd07d493b8f0a9390a4c22f1e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e 2024-12-07T18:20:53,459 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e, entries=150, sequenceid=230, filesize=11.9 K 2024-12-07T18:20:53,459 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 81dd0984f21c5170abf1b07080819b3d in 1274ms, sequenceid=230, compaction requested=false 2024-12-07T18:20:53,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:53,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:53,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-07T18:20:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-07T18:20:53,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-07T18:20:53,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4980 sec 2024-12-07T18:20:53,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.5010 sec 2024-12-07T18:20:53,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:53,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:53,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:53,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207c4b6472e0179448c90f49455b009e6ff_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742270_1446 (size=14794) 2024-12-07T18:20:53,797 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:53,802 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207c4b6472e0179448c90f49455b009e6ff_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c4b6472e0179448c90f49455b009e6ff_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:53,803 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/d1ac8d15f6f7438d927b3ef906725ab7, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:53,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/d1ac8d15f6f7438d927b3ef906725ab7 is 175, key is test_row_0/A:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:53,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742271_1447 (size=39749) 2024-12-07T18:20:53,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595713840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595713841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595713943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:53,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:53,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595713946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595714149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595714152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,208 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/d1ac8d15f6f7438d927b3ef906725ab7 2024-12-07T18:20:54,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/6afd25630d71477e827b1cef99577f32 is 50, key is test_row_0/B:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:54,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742272_1448 (size=12151) 2024-12-07T18:20:54,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/6afd25630d71477e827b1cef99577f32 2024-12-07T18:20:54,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d7bee2f63fd34d2f8ffdeb263570b4d1 is 50, key is test_row_0/C:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:54,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742273_1449 (size=12151) 2024-12-07T18:20:54,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d7bee2f63fd34d2f8ffdeb263570b4d1 2024-12-07T18:20:54,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/d1ac8d15f6f7438d927b3ef906725ab7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7 2024-12-07T18:20:54,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7, entries=200, sequenceid=244, filesize=38.8 K 2024-12-07T18:20:54,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/6afd25630d71477e827b1cef99577f32 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32 2024-12-07T18:20:54,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32, entries=150, sequenceid=244, filesize=11.9 K 2024-12-07T18:20:54,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/d7bee2f63fd34d2f8ffdeb263570b4d1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1 2024-12-07T18:20:54,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1, entries=150, sequenceid=244, filesize=11.9 K 2024-12-07T18:20:54,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 81dd0984f21c5170abf1b07080819b3d in 485ms, sequenceid=244, compaction requested=true 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:54,267 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:54,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:54,267 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:54,268 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:54,268 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/A is initiating minor compaction (all files) 2024-12-07T18:20:54,268 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:54,268 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/A in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:54,268 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/B is initiating minor compaction (all files) 2024-12-07T18:20:54,268 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/B in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:54,268 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=100.0 K 2024-12-07T18:20:54,268 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/a6887d2f87de4c96a3fe2814644dd2e9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=36.0 K 2024-12-07T18:20:54,268 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:54,268 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7] 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a6887d2f87de4c96a3fe2814644dd2e9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595650369 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10a6f873e375422cbebe661e2eae97dd, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595650369 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4eb6849b9a3f4ac593d5ad74e7bb7ba5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733595650454 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b78d15f65794d9b8768abdba7e5af80, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733595650454 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1ac8d15f6f7438d927b3ef906725ab7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652613 2024-12-07T18:20:54,269 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6afd25630d71477e827b1cef99577f32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652628 2024-12-07T18:20:54,276 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:54,279 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#B#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:54,279 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d220d1cede9f4260a824c27c17cbec64 is 50, key is test_row_0/B:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:54,281 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207e6f9e68eeece40dcb8137c6d20037c51_81dd0984f21c5170abf1b07080819b3d store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:54,283 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207e6f9e68eeece40dcb8137c6d20037c51_81dd0984f21c5170abf1b07080819b3d, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:54,283 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e6f9e68eeece40dcb8137c6d20037c51_81dd0984f21c5170abf1b07080819b3d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:54,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742274_1450 (size=12697) 2024-12-07T18:20:54,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742275_1451 (size=4469) 2024-12-07T18:20:54,305 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#A#compaction#378 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:54,306 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/c4c1e87de63b4bf1b2f0578730b74997 is 175, key is test_row_0/A:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:54,310 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d220d1cede9f4260a824c27c17cbec64 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d220d1cede9f4260a824c27c17cbec64 2024-12-07T18:20:54,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742276_1452 (size=31651) 2024-12-07T18:20:54,315 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/B of 81dd0984f21c5170abf1b07080819b3d into d220d1cede9f4260a824c27c17cbec64(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:54,315 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:54,316 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/B, priority=13, startTime=1733595654267; duration=0sec 2024-12-07T18:20:54,316 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:54,316 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:B 2024-12-07T18:20:54,316 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:54,316 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/c4c1e87de63b4bf1b2f0578730b74997 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997 2024-12-07T18:20:54,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:54,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/C is initiating minor compaction (all files) 2024-12-07T18:20:54,317 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/C in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:54,317 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6eff63bdb3884236b5a40d4438be41fa, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=36.0 K 2024-12-07T18:20:54,317 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6eff63bdb3884236b5a40d4438be41fa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733595650369 2024-12-07T18:20:54,318 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c5d5f8dd07d493b8f0a9390a4c22f1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733595650454 2024-12-07T18:20:54,318 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d7bee2f63fd34d2f8ffdeb263570b4d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652628 2024-12-07T18:20:54,322 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/A of 81dd0984f21c5170abf1b07080819b3d into c4c1e87de63b4bf1b2f0578730b74997(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:54,322 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:54,322 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/A, priority=13, startTime=1733595654267; duration=0sec 2024-12-07T18:20:54,322 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:54,322 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:A 2024-12-07T18:20:54,327 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#C#compaction#380 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:54,327 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/4a629f089dcd49e1a67f5a1d3c8f8002 is 50, key is test_row_0/C:col10/1733595653781/Put/seqid=0 2024-12-07T18:20:54,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742277_1453 (size=12697) 2024-12-07T18:20:54,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:54,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:54,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120790464ee84984409fb9c832018015392c_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595653829/Put/seqid=0 2024-12-07T18:20:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742278_1454 (size=14994) 2024-12-07T18:20:54,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595714480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595714482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595714585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595714587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,735 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/4a629f089dcd49e1a67f5a1d3c8f8002 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4a629f089dcd49e1a67f5a1d3c8f8002 2024-12-07T18:20:54,739 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/C of 81dd0984f21c5170abf1b07080819b3d into 4a629f089dcd49e1a67f5a1d3c8f8002(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:54,739 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:54,739 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/C, priority=13, startTime=1733595654267; duration=0sec 2024-12-07T18:20:54,739 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:54,739 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:C 2024-12-07T18:20:54,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595714788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:54,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595714790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:54,874 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:54,877 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120790464ee84984409fb9c832018015392c_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120790464ee84984409fb9c832018015392c_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:54,878 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/7a04830a95b34f088be3b80d8a4172bd, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:54,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/7a04830a95b34f088be3b80d8a4172bd is 175, key is test_row_0/A:col10/1733595653829/Put/seqid=0 2024-12-07T18:20:54,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742279_1455 (size=39949) 2024-12-07T18:20:54,882 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=270, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/7a04830a95b34f088be3b80d8a4172bd 2024-12-07T18:20:54,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/42ca1984a71b4708aaccb3e24aca6007 is 50, key is test_row_0/B:col10/1733595653829/Put/seqid=0 2024-12-07T18:20:54,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742280_1456 (size=12301) 2024-12-07T18:20:54,902 DEBUG [Thread-1737 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x133cc1f0 to 127.0.0.1:56016 2024-12-07T18:20:54,902 DEBUG [Thread-1737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:54,905 DEBUG [Thread-1735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x191ae36a to 127.0.0.1:56016 2024-12-07T18:20:54,905 DEBUG [Thread-1735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:54,905 DEBUG [Thread-1731 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d930fb1 to 127.0.0.1:56016 2024-12-07T18:20:54,905 DEBUG [Thread-1731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:54,905 DEBUG [Thread-1729 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b7324d5 to 127.0.0.1:56016 2024-12-07T18:20:54,905 DEBUG [Thread-1729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:54,909 DEBUG [Thread-1733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x114e6211 to 127.0.0.1:56016 2024-12-07T18:20:54,909 DEBUG [Thread-1733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:55,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T18:20:55,067 INFO [Thread-1728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-07T18:20:55,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595715093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595715094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40294 deadline: 1733595715226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,227 DEBUG [Thread-1718 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:55,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40310 deadline: 1733595715231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40258 deadline: 1733595715231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,231 DEBUG [Thread-1724 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:55,231 DEBUG [Thread-1720 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:20:55,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/42ca1984a71b4708aaccb3e24aca6007 2024-12-07T18:20:55,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5236a2dc612a47d2895a10a00ec81206 is 50, key is test_row_0/C:col10/1733595653829/Put/seqid=0 2024-12-07T18:20:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742281_1457 (size=12301) 2024-12-07T18:20:55,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40272 deadline: 1733595715596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:20:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1733595715598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:20:55,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5236a2dc612a47d2895a10a00ec81206 2024-12-07T18:20:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/7a04830a95b34f088be3b80d8a4172bd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd 2024-12-07T18:20:55,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd, entries=200, sequenceid=270, filesize=39.0 K 2024-12-07T18:20:55,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/42ca1984a71b4708aaccb3e24aca6007 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007 2024-12-07T18:20:55,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007, entries=150, sequenceid=270, filesize=12.0 K 2024-12-07T18:20:55,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/5236a2dc612a47d2895a10a00ec81206 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206 2024-12-07T18:20:55,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206, entries=150, sequenceid=270, filesize=12.0 K 2024-12-07T18:20:55,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 81dd0984f21c5170abf1b07080819b3d in 1254ms, sequenceid=270, compaction requested=false 2024-12-07T18:20:55,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:56,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:56,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:20:56,604 DEBUG [Thread-1722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64a04d7a to 127.0.0.1:56016 2024-12-07T18:20:56,604 DEBUG [Thread-1722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:20:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:20:56,607 DEBUG [Thread-1726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d7912a0 to 127.0.0.1:56016 2024-12-07T18:20:56,607 DEBUG [Thread-1726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:20:56,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207cc6108dc707e421fb741a783bb3a5096_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:56,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742282_1458 (size=12454) 2024-12-07T18:20:57,014 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:20:57,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207cc6108dc707e421fb741a783bb3a5096_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cc6108dc707e421fb741a783bb3a5096_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:20:57,017 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/22996c67ec7f4835aee4caf51ffdc3bc, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:57,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/22996c67ec7f4835aee4caf51ffdc3bc is 175, key is test_row_0/A:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742283_1459 (size=31255) 2024-12-07T18:20:57,422 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/22996c67ec7f4835aee4caf51ffdc3bc 2024-12-07T18:20:57,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/9e889bba08324bab92095928fe81130c is 50, key is test_row_0/B:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742284_1460 (size=12301) 2024-12-07T18:20:57,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/9e889bba08324bab92095928fe81130c 2024-12-07T18:20:57,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/23732e3102984258a6c01b96f91a928b is 50, key is test_row_0/C:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:57,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742285_1461 (size=12301) 2024-12-07T18:20:58,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/23732e3102984258a6c01b96f91a928b 2024-12-07T18:20:58,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/22996c67ec7f4835aee4caf51ffdc3bc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc 2024-12-07T18:20:58,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc, entries=150, sequenceid=284, filesize=30.5 K 2024-12-07T18:20:58,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/9e889bba08324bab92095928fe81130c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c 2024-12-07T18:20:58,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c, entries=150, sequenceid=284, filesize=12.0 K 2024-12-07T18:20:58,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/23732e3102984258a6c01b96f91a928b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b 2024-12-07T18:20:58,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b, entries=150, sequenceid=284, filesize=12.0 K 2024-12-07T18:20:58,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=6.71 KB/6870 for 81dd0984f21c5170abf1b07080819b3d in 1652ms, sequenceid=284, compaction requested=true 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 81dd0984f21c5170abf1b07080819b3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:20:58,256 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:58,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:58,256 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102855 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/A is initiating minor compaction (all files) 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/B is initiating minor compaction (all files) 2024-12-07T18:20:58,257 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/A in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:58,257 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/B in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:58,257 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d220d1cede9f4260a824c27c17cbec64, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=36.4 K 2024-12-07T18:20:58,257 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=100.4 K 2024-12-07T18:20:58,257 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc] 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d220d1cede9f4260a824c27c17cbec64, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652628 2024-12-07T18:20:58,257 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c1e87de63b4bf1b2f0578730b74997, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652628 2024-12-07T18:20:58,258 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 42ca1984a71b4708aaccb3e24aca6007, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733595653829 2024-12-07T18:20:58,258 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a04830a95b34f088be3b80d8a4172bd, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733595653829 2024-12-07T18:20:58,258 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e889bba08324bab92095928fe81130c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595654474 2024-12-07T18:20:58,258 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22996c67ec7f4835aee4caf51ffdc3bc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595654474 2024-12-07T18:20:58,264 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:58,265 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412071b22806181874264a00ffdf29ad15c59_81dd0984f21c5170abf1b07080819b3d store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:58,266 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#B#compaction#387 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:58,267 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/baceb9ed2449477c8f0e2cf3a563215d is 50, key is test_row_0/B:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:58,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742286_1462 (size=12949) 2024-12-07T18:20:58,290 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412071b22806181874264a00ffdf29ad15c59_81dd0984f21c5170abf1b07080819b3d, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:58,290 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071b22806181874264a00ffdf29ad15c59_81dd0984f21c5170abf1b07080819b3d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:20:58,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742287_1463 (size=4469) 2024-12-07T18:20:58,304 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#A#compaction#388 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:58,305 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/64de5159849846b9ae428f761b61ced0 is 175, key is test_row_0/A:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:58,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742288_1464 (size=31903) 2024-12-07T18:20:58,319 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/64de5159849846b9ae428f761b61ced0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/64de5159849846b9ae428f761b61ced0 2024-12-07T18:20:58,324 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/A of 81dd0984f21c5170abf1b07080819b3d into 64de5159849846b9ae428f761b61ced0(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:58,324 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:58,324 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/A, priority=13, startTime=1733595658256; duration=0sec 2024-12-07T18:20:58,325 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:20:58,325 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:A 2024-12-07T18:20:58,325 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:20:58,326 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:20:58,326 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 81dd0984f21c5170abf1b07080819b3d/C is initiating minor compaction (all files) 2024-12-07T18:20:58,326 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 81dd0984f21c5170abf1b07080819b3d/C in TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:20:58,326 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4a629f089dcd49e1a67f5a1d3c8f8002, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp, totalSize=36.4 K 2024-12-07T18:20:58,327 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a629f089dcd49e1a67f5a1d3c8f8002, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733595652628 2024-12-07T18:20:58,327 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5236a2dc612a47d2895a10a00ec81206, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733595653829 2024-12-07T18:20:58,327 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23732e3102984258a6c01b96f91a928b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595654474 2024-12-07T18:20:58,336 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 81dd0984f21c5170abf1b07080819b3d#C#compaction#389 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:20:58,337 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/2911104253a44ed28c71b73ed4c991da is 50, key is test_row_0/C:col10/1733595656603/Put/seqid=0 2024-12-07T18:20:58,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742289_1465 (size=12949) 2024-12-07T18:20:58,675 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/baceb9ed2449477c8f0e2cf3a563215d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/baceb9ed2449477c8f0e2cf3a563215d 2024-12-07T18:20:58,679 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/B of 81dd0984f21c5170abf1b07080819b3d into baceb9ed2449477c8f0e2cf3a563215d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:58,679 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:58,679 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/B, priority=13, startTime=1733595658256; duration=0sec 2024-12-07T18:20:58,679 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:58,679 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:B 2024-12-07T18:20:58,745 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/2911104253a44ed28c71b73ed4c991da as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/2911104253a44ed28c71b73ed4c991da 2024-12-07T18:20:58,748 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 81dd0984f21c5170abf1b07080819b3d/C of 81dd0984f21c5170abf1b07080819b3d into 2911104253a44ed28c71b73ed4c991da(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:20:58,748 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:20:58,748 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d., storeName=81dd0984f21c5170abf1b07080819b3d/C, priority=13, startTime=1733595658256; duration=0sec 2024-12-07T18:20:58,748 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:20:58,748 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 81dd0984f21c5170abf1b07080819b3d:C 2024-12-07T18:21:00,639 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:21:04,307 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc] to archive 2024-12-07T18:21:04,308 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:04,309 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/cefb51ff6e58465a8a466042ccce5094 2024-12-07T18:21:04,310 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/2f77bc50c3d6455fb3c8125fa8927973 2024-12-07T18:21:04,311 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/238c907bb2c84e3bb42f157e39169008 2024-12-07T18:21:04,312 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/659123d91f0c425bbf366fae64196e22 2024-12-07T18:21:04,313 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/a17023cf8a124c438a7864bbe7de557d 2024-12-07T18:21:04,314 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4fd5a7a098524816a7cb3fc00ee1ad86 2024-12-07T18:21:04,314 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f2c079484f34d918c6bf260076eada2 2024-12-07T18:21:04,315 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/1e44fef0da8440c297960450cdec7d4e 2024-12-07T18:21:04,316 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3606165ddcdf4dbeafed2c0d2c644426 2024-12-07T18:21:04,317 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/580a22dca33640f19186560d730d2512 2024-12-07T18:21:04,318 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/3f58737546f54e10b7cdd9a6f9c827f4 2024-12-07T18:21:04,318 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/dfff903b378141dbbdb5c50181f82693 2024-12-07T18:21:04,319 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/89f0d080973b43ebb128f48043f101dc 2024-12-07T18:21:04,320 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/10a6f873e375422cbebe661e2eae97dd 2024-12-07T18:21:04,321 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/4eb6849b9a3f4ac593d5ad74e7bb7ba5 2024-12-07T18:21:04,322 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/d1ac8d15f6f7438d927b3ef906725ab7 2024-12-07T18:21:04,322 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/c4c1e87de63b4bf1b2f0578730b74997 2024-12-07T18:21:04,323 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/7a04830a95b34f088be3b80d8a4172bd 2024-12-07T18:21:04,324 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/22996c67ec7f4835aee4caf51ffdc3bc 2024-12-07T18:21:04,326 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/48ca80d9f7e44dd8b46774ebd20266a9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/12e735bff11349cd8035a7a19cdecb30, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/a6887d2f87de4c96a3fe2814644dd2e9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d220d1cede9f4260a824c27c17cbec64, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c] to archive 2024-12-07T18:21:04,327 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:04,328 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/5df10479964d4daebdf4eb799bd42108 2024-12-07T18:21:04,328 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d4d398f92d764cbeb21bc7f80e99c6bc 2024-12-07T18:21:04,329 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/48ca80d9f7e44dd8b46774ebd20266a9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/48ca80d9f7e44dd8b46774ebd20266a9 2024-12-07T18:21:04,330 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/236abbbb534b4132b2a02de78bc4a1ab 2024-12-07T18:21:04,331 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/27322be48d304ed1a9c15700bfb5d2cf 2024-12-07T18:21:04,332 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/12e735bff11349cd8035a7a19cdecb30 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/12e735bff11349cd8035a7a19cdecb30 2024-12-07T18:21:04,333 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/7ea6f137993b468e9762afe7cf3bcdbc 2024-12-07T18:21:04,333 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/8d5078742f94458394e5be86a761a603 2024-12-07T18:21:04,334 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/2e5ba0836f694bea8766c25ee550b6fc 2024-12-07T18:21:04,335 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/109c528ed9cf4136b9407fee2803d70d 2024-12-07T18:21:04,336 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/eed0e0bb593d4223aa381880caaa0ea3 2024-12-07T18:21:04,337 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/57d931af010a451bafbbd4c5f0296bf5 2024-12-07T18:21:04,337 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/a6887d2f87de4c96a3fe2814644dd2e9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/a6887d2f87de4c96a3fe2814644dd2e9 2024-12-07T18:21:04,338 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/f9e8fe3d100740fa9e3ad1acfce3a4c9 2024-12-07T18:21:04,339 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/1b78d15f65794d9b8768abdba7e5af80 2024-12-07T18:21:04,339 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d220d1cede9f4260a824c27c17cbec64 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d220d1cede9f4260a824c27c17cbec64 2024-12-07T18:21:04,340 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/6afd25630d71477e827b1cef99577f32 2024-12-07T18:21:04,341 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/42ca1984a71b4708aaccb3e24aca6007 2024-12-07T18:21:04,342 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/9e889bba08324bab92095928fe81130c 2024-12-07T18:21:04,344 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7d4326cfceed4328a9c29e1c05ec5370, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/fee36b812b6c48b484f0e9cb1ccad99d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6eff63bdb3884236b5a40d4438be41fa, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4a629f089dcd49e1a67f5a1d3c8f8002, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b] to archive 2024-12-07T18:21:04,344 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:04,345 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5bae64496d0b4e8ba52e1ea688c4c0b2 2024-12-07T18:21:04,346 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/20566051e8694c469575d562c7af2eae 2024-12-07T18:21:04,347 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7d4326cfceed4328a9c29e1c05ec5370 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7d4326cfceed4328a9c29e1c05ec5370 2024-12-07T18:21:04,348 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d82a7d694a724384b961564ab66dd31e 2024-12-07T18:21:04,348 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d8fd566254584a30bc300be1643ca16c 2024-12-07T18:21:04,349 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/fee36b812b6c48b484f0e9cb1ccad99d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/fee36b812b6c48b484f0e9cb1ccad99d 2024-12-07T18:21:04,350 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/477ae2faba7b407a9ceeaf8127abec63 2024-12-07T18:21:04,351 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4cc6caa674a843b2936b50b1a973eee5 2024-12-07T18:21:04,351 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7e704e7bb1af433199382905297a87fe 2024-12-07T18:21:04,352 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/9e8ec299c4b4476f90274858e8dda150 2024-12-07T18:21:04,353 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/ac8307b227914e208a7bc8074017c92e 2024-12-07T18:21:04,354 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/8a6d8856764b462abbc2bd39145989be 2024-12-07T18:21:04,355 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6eff63bdb3884236b5a40d4438be41fa to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6eff63bdb3884236b5a40d4438be41fa 2024-12-07T18:21:04,355 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/7c3c336311d2417fa5e640edc8b484ed 2024-12-07T18:21:04,356 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/6c5d5f8dd07d493b8f0a9390a4c22f1e 2024-12-07T18:21:04,357 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4a629f089dcd49e1a67f5a1d3c8f8002 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/4a629f089dcd49e1a67f5a1d3c8f8002 2024-12-07T18:21:04,358 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/d7bee2f63fd34d2f8ffdeb263570b4d1 2024-12-07T18:21:04,358 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/5236a2dc612a47d2895a10a00ec81206 2024-12-07T18:21:04,359 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/8a7a030b35db:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/23732e3102984258a6c01b96f91a928b 2024-12-07T18:21:05,253 DEBUG [Thread-1718 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c8cc27b to 127.0.0.1:56016 2024-12-07T18:21:05,253 DEBUG [Thread-1718 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:05,274 DEBUG [Thread-1724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3268230a to 127.0.0.1:56016 2024-12-07T18:21:05,274 DEBUG [Thread-1724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:05,290 DEBUG [Thread-1720 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x081cac4f to 127.0.0.1:56016 2024-12-07T18:21:05,290 DEBUG [Thread-1720 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:05,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:21:05,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2287 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6860 rows 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2277 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6831 rows 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2293 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6876 rows 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2296 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6888 rows 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2287 2024-12-07T18:21:05,291 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6861 rows 2024-12-07T18:21:05,291 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:21:05,291 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ecf33fc to 127.0.0.1:56016 2024-12-07T18:21:05,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:05,293 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:21:05,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:21:05,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:05,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:05,296 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595665296"}]},"ts":"1733595665296"} 2024-12-07T18:21:05,297 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:21:05,300 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:21:05,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:21:05,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, UNASSIGN}] 2024-12-07T18:21:05,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, UNASSIGN 2024-12-07T18:21:05,303 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:05,304 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:21:05,304 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; CloseRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:05,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:05,456 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(124): Close 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1681): Closing 81dd0984f21c5170abf1b07080819b3d, disabling compactions & flushes 2024-12-07T18:21:05,456 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. after waiting 0 ms 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:21:05,456 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(2837): Flushing 81dd0984f21c5170abf1b07080819b3d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=A 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=B 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 81dd0984f21c5170abf1b07080819b3d, store=C 2024-12-07T18:21:05,456 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:05,462 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073d0e027160cc423f997b6f21b62a4582_81dd0984f21c5170abf1b07080819b3d is 50, key is test_row_0/A:col10/1733595665252/Put/seqid=0 2024-12-07T18:21:05,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742290_1466 (size=12454) 2024-12-07T18:21:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:05,866 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:05,869 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073d0e027160cc423f997b6f21b62a4582_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073d0e027160cc423f997b6f21b62a4582_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:05,870 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/bb6a5a13e8ca4c26af7683d20978aa12, store: [table=TestAcidGuarantees family=A region=81dd0984f21c5170abf1b07080819b3d] 2024-12-07T18:21:05,870 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/bb6a5a13e8ca4c26af7683d20978aa12 is 175, key is test_row_0/A:col10/1733595665252/Put/seqid=0 2024-12-07T18:21:05,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742291_1467 (size=31255) 2024-12-07T18:21:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:06,274 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/bb6a5a13e8ca4c26af7683d20978aa12 2024-12-07T18:21:06,279 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d6325a34fab544e59653c4a05ed512f3 is 50, key is test_row_0/B:col10/1733595665252/Put/seqid=0 2024-12-07T18:21:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742292_1468 (size=12301) 2024-12-07T18:21:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:06,683 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d6325a34fab544e59653c4a05ed512f3 2024-12-07T18:21:06,689 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/67a1a1724d264d84b049398ba2cc0ade is 50, key is test_row_0/C:col10/1733595665252/Put/seqid=0 2024-12-07T18:21:06,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742293_1469 (size=12301) 2024-12-07T18:21:07,092 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/67a1a1724d264d84b049398ba2cc0ade 2024-12-07T18:21:07,096 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/A/bb6a5a13e8ca4c26af7683d20978aa12 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/bb6a5a13e8ca4c26af7683d20978aa12 2024-12-07T18:21:07,099 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/bb6a5a13e8ca4c26af7683d20978aa12, entries=150, sequenceid=294, filesize=30.5 K 2024-12-07T18:21:07,099 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/B/d6325a34fab544e59653c4a05ed512f3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d6325a34fab544e59653c4a05ed512f3 2024-12-07T18:21:07,102 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d6325a34fab544e59653c4a05ed512f3, entries=150, sequenceid=294, filesize=12.0 K 2024-12-07T18:21:07,102 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/.tmp/C/67a1a1724d264d84b049398ba2cc0ade as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/67a1a1724d264d84b049398ba2cc0ade 2024-12-07T18:21:07,105 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/67a1a1724d264d84b049398ba2cc0ade, entries=150, sequenceid=294, filesize=12.0 K 2024-12-07T18:21:07,105 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 81dd0984f21c5170abf1b07080819b3d in 1649ms, sequenceid=294, compaction requested=false 2024-12-07T18:21:07,109 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits/297.seqid, newMaxSeqId=297, maxSeqId=4 2024-12-07T18:21:07,109 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d. 2024-12-07T18:21:07,109 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1635): Region close journal for 81dd0984f21c5170abf1b07080819b3d: 2024-12-07T18:21:07,111 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(170): Closed 81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,111 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=81dd0984f21c5170abf1b07080819b3d, regionState=CLOSED 2024-12-07T18:21:07,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-07T18:21:07,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseRegionProcedure 81dd0984f21c5170abf1b07080819b3d, server=8a7a030b35db,45237,1733595542335 in 1.8080 sec 2024-12-07T18:21:07,114 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-07T18:21:07,114 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=81dd0984f21c5170abf1b07080819b3d, UNASSIGN in 1.8110 sec 2024-12-07T18:21:07,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-07T18:21:07,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8140 sec 2024-12-07T18:21:07,116 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595667116"}]},"ts":"1733595667116"} 2024-12-07T18:21:07,117 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:21:07,119 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:21:07,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8260 sec 2024-12-07T18:21:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-07T18:21:07,400 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-07T18:21:07,400 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:21:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,402 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-07T18:21:07,402 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,404 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits] 2024-12-07T18:21:07,407 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/64de5159849846b9ae428f761b61ced0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/64de5159849846b9ae428f761b61ced0 2024-12-07T18:21:07,408 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/bb6a5a13e8ca4c26af7683d20978aa12 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/A/bb6a5a13e8ca4c26af7683d20978aa12 2024-12-07T18:21:07,410 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/baceb9ed2449477c8f0e2cf3a563215d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/baceb9ed2449477c8f0e2cf3a563215d 2024-12-07T18:21:07,411 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d6325a34fab544e59653c4a05ed512f3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/B/d6325a34fab544e59653c4a05ed512f3 2024-12-07T18:21:07,412 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/2911104253a44ed28c71b73ed4c991da to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/2911104253a44ed28c71b73ed4c991da 2024-12-07T18:21:07,413 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/67a1a1724d264d84b049398ba2cc0ade to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/C/67a1a1724d264d84b049398ba2cc0ade 2024-12-07T18:21:07,415 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits/297.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d/recovered.edits/297.seqid 2024-12-07T18:21:07,416 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,416 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:21:07,416 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:21:07,417 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-07T18:21:07,419 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071f24c7f0ed3641efa94bc457419088a0_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071f24c7f0ed3641efa94bc457419088a0_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,420 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072dd417852fbd434587c6db58d3ed7588_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412072dd417852fbd434587c6db58d3ed7588_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,421 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737541f7690fb416596bd01eb15fa1777_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120737541f7690fb416596bd01eb15fa1777_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,422 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073d0e027160cc423f997b6f21b62a4582_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073d0e027160cc423f997b6f21b62a4582_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,423 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207406a4fc2581b4310a75b3637610aabdb_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207406a4fc2581b4310a75b3637610aabdb_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,424 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207435c2a4b3d9743b290da8eeeedb4167e_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207435c2a4b3d9743b290da8eeeedb4167e_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,425 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077860c7e8d25a49cfb8859328e2812ac1_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412077860c7e8d25a49cfb8859328e2812ac1_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,426 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207813663ac95ae4fb49118d76637dd88ba_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207813663ac95ae4fb49118d76637dd88ba_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,427 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120790464ee84984409fb9c832018015392c_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120790464ee84984409fb9c832018015392c_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,428 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120798113c4bca084ebaa5f76b865c963d3d_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120798113c4bca084ebaa5f76b865c963d3d_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,429 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079c0a6e52d5714cfd8073aa697e918273_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079c0a6e52d5714cfd8073aa697e918273_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,430 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c4b6472e0179448c90f49455b009e6ff_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207c4b6472e0179448c90f49455b009e6ff_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,430 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cc6108dc707e421fb741a783bb3a5096_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cc6108dc707e421fb741a783bb3a5096_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,431 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d356367899a54f9ebdf44d2a263fa6ad_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d356367899a54f9ebdf44d2a263fa6ad_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,432 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e010bf8086074bbbbe32843c2bb77c2b_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e010bf8086074bbbbe32843c2bb77c2b_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,433 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f66ff8e6654b41ec989da72a5f076b61_81dd0984f21c5170abf1b07080819b3d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f66ff8e6654b41ec989da72a5f076b61_81dd0984f21c5170abf1b07080819b3d 2024-12-07T18:21:07,434 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:21:07,436 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,437 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:21:07,439 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:21:07,439 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,439 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:21:07,439 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595667439"}]},"ts":"9223372036854775807"} 2024-12-07T18:21:07,441 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:21:07,441 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 81dd0984f21c5170abf1b07080819b3d, NAME => 'TestAcidGuarantees,,1733595631837.81dd0984f21c5170abf1b07080819b3d.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:21:07,441 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:21:07,441 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595667441"}]},"ts":"9223372036854775807"} 2024-12-07T18:21:07,442 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:21:07,444 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 44 msec 2024-12-07T18:21:07,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-07T18:21:07,503 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-07T18:21:07,513 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237 (was 239), OpenFileDescriptor=448 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=442 (was 500), ProcessCount=11 (was 11), AvailableMemoryMB=6932 (was 7004) 2024-12-07T18:21:07,522 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=442, ProcessCount=11, AvailableMemoryMB=6931 2024-12-07T18:21:07,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:21:07,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:21:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:07,525 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:21:07,525 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:07,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 131 2024-12-07T18:21:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:07,526 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:21:07,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742294_1470 (size=963) 2024-12-07T18:21:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:07,932 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:21:07,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742295_1471 (size=53) 2024-12-07T18:21:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:08,337 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:21:08,338 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b2a15b0d424567342f9c04e70cd08f88, disabling compactions & flushes 2024-12-07T18:21:08,338 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,338 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,338 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. after waiting 0 ms 2024-12-07T18:21:08,338 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,338 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,338 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:08,339 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:21:08,339 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595668339"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595668339"}]},"ts":"1733595668339"} 2024-12-07T18:21:08,340 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:21:08,340 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:21:08,341 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595668340"}]},"ts":"1733595668340"} 2024-12-07T18:21:08,341 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:21:08,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, ASSIGN}] 2024-12-07T18:21:08,346 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, ASSIGN 2024-12-07T18:21:08,346 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:21:08,497 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=b2a15b0d424567342f9c04e70cd08f88, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:08,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; OpenRegionProcedure b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:08,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:08,651 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,652 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7285): Opening region: {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:21:08,652 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,652 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:21:08,652 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7327): checking encryption for b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,652 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7330): checking classloading for b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,653 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,654 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:08,654 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b2a15b0d424567342f9c04e70cd08f88 columnFamilyName A 2024-12-07T18:21:08,654 DEBUG [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:08,655 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(327): Store=b2a15b0d424567342f9c04e70cd08f88/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:08,655 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,655 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:08,656 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b2a15b0d424567342f9c04e70cd08f88 columnFamilyName B 2024-12-07T18:21:08,656 DEBUG [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:08,656 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(327): Store=b2a15b0d424567342f9c04e70cd08f88/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:08,656 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,657 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:08,657 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b2a15b0d424567342f9c04e70cd08f88 columnFamilyName C 2024-12-07T18:21:08,657 DEBUG [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:08,657 INFO [StoreOpener-b2a15b0d424567342f9c04e70cd08f88-1 {}] regionserver.HStore(327): Store=b2a15b0d424567342f9c04e70cd08f88/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:08,658 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,658 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,658 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,659 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:21:08,660 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1085): writing seq id for b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:08,661 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:21:08,661 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1102): Opened b2a15b0d424567342f9c04e70cd08f88; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66811156, jitterRate=-0.004436194896697998}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:21:08,662 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1001): Region open journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:08,663 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., pid=133, masterSystemTime=1733595668649 2024-12-07T18:21:08,664 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,664 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:08,664 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=b2a15b0d424567342f9c04e70cd08f88, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:08,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-07T18:21:08,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; OpenRegionProcedure b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 in 167 msec 2024-12-07T18:21:08,667 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-07T18:21:08,667 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, ASSIGN in 321 msec 2024-12-07T18:21:08,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:21:08,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595668667"}]},"ts":"1733595668667"} 2024-12-07T18:21:08,668 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:21:08,670 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:21:08,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-12-07T18:21:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-07T18:21:09,629 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 131 completed 2024-12-07T18:21:09,630 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4fbee617 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4efaf022 2024-12-07T18:21:09,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65036559, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,634 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,636 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,636 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:21:09,637 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:21:09,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46c37647 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f21f55d 2024-12-07T18:21:09,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f67a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fb24d40 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f69def6 2024-12-07T18:21:09,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d5fe744, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51453050 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60eadae0 2024-12-07T18:21:09,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@721d647e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x632d1806 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55a6e359 2024-12-07T18:21:09,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c014307, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f99adfe to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d47237f 2024-12-07T18:21:09,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b9854ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,654 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e690d6 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b72a92d 2024-12-07T18:21:09,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a4d4e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,657 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3abeec20 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44fb119b 2024-12-07T18:21:09,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44462a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,663 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00df2701 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c349948 2024-12-07T18:21:09,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69d7a6f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,666 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35ca71a1 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d0c5089 2024-12-07T18:21:09,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5938a7c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56a4483a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3943c27f 2024-12-07T18:21:09,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25593478, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:09,674 DEBUG [hconnection-0x181b269b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:09,675 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-07T18:21:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:09,676 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:09,677 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:09,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:09,680 DEBUG [hconnection-0x6ec608b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,681 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,681 DEBUG [hconnection-0x1b6bf802-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,681 DEBUG [hconnection-0x59b0813c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,681 DEBUG [hconnection-0x3dd5bd86-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,681 DEBUG [hconnection-0x2b5e90b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,682 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,682 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,682 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,682 DEBUG [hconnection-0x156a5360-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,683 DEBUG [hconnection-0xb3f5384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,684 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56946, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,684 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,684 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,684 DEBUG [hconnection-0x56cbaf31-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,684 DEBUG [hconnection-0x1b9962ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:09,685 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,686 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:09,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:09,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e798c5eb2c8747dd9796092a068d7c9f is 50, key is test_row_0/A:col10/1733595669684/Put/seqid=0 2024-12-07T18:21:09,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595729724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595729725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595729726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595729728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595729729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742296_1472 (size=12001) 2024-12-07T18:21:09,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e798c5eb2c8747dd9796092a068d7c9f 2024-12-07T18:21:09,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/30af0008e9ad4955a3eaac480c51265a is 50, key is test_row_0/B:col10/1733595669684/Put/seqid=0 2024-12-07T18:21:09,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:09,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742297_1473 (size=12001) 2024-12-07T18:21:09,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:09,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:09,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595729830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595729831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595729831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595729831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595729836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:09,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:09,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:09,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595730037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595730037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595730037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595730038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595730038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,134 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/30af0008e9ad4955a3eaac480c51265a 2024-12-07T18:21:10,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8c2b029dc9a842d7b05901b86b3cefc6 is 50, key is test_row_0/C:col10/1733595669684/Put/seqid=0 2024-12-07T18:21:10,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742298_1474 (size=12001) 2024-12-07T18:21:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:10,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:10,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:10,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595730343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595730344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595730344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595730344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595730345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:10,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:10,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:10,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8c2b029dc9a842d7b05901b86b3cefc6 2024-12-07T18:21:10,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e798c5eb2c8747dd9796092a068d7c9f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f 2024-12-07T18:21:10,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f, entries=150, sequenceid=12, filesize=11.7 K 2024-12-07T18:21:10,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/30af0008e9ad4955a3eaac480c51265a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a 2024-12-07T18:21:10,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a, entries=150, sequenceid=12, filesize=11.7 K 2024-12-07T18:21:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8c2b029dc9a842d7b05901b86b3cefc6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6 2024-12-07T18:21:10,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6, entries=150, sequenceid=12, filesize=11.7 K 2024-12-07T18:21:10,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b2a15b0d424567342f9c04e70cd08f88 in 939ms, sequenceid=12, compaction requested=false 2024-12-07T18:21:10,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:10,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-07T18:21:10,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:10,746 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:10,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:10,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:10,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:10,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:10,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:10,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:10,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/644a3d7a808341d6823130a5d6f3b7f8 is 50, key is test_row_0/A:col10/1733595669727/Put/seqid=0 2024-12-07T18:21:10,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742299_1475 (size=12001) 2024-12-07T18:21:10,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:10,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:10,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595730854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595730857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595730858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595730859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595730860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595730960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595730963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595730963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595730967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:10,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:10,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595730968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595731166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595731167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,171 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/644a3d7a808341d6823130a5d6f3b7f8 2024-12-07T18:21:11,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595731167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595731173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595731173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0d22cf9b23854c508b2624e63730c6a6 is 50, key is test_row_0/B:col10/1733595669727/Put/seqid=0 2024-12-07T18:21:11,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742300_1476 (size=12001) 2024-12-07T18:21:11,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595731471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595731472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595731473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595731480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595731481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,594 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0d22cf9b23854c508b2624e63730c6a6 2024-12-07T18:21:11,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/cd73b93728e74e0087f63e7e7f979c56 is 50, key is test_row_0/C:col10/1733595669727/Put/seqid=0 2024-12-07T18:21:11,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742301_1477 (size=12001) 2024-12-07T18:21:11,607 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/cd73b93728e74e0087f63e7e7f979c56 2024-12-07T18:21:11,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/644a3d7a808341d6823130a5d6f3b7f8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8 2024-12-07T18:21:11,615 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:21:11,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0d22cf9b23854c508b2624e63730c6a6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6 2024-12-07T18:21:11,619 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:21:11,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/cd73b93728e74e0087f63e7e7f979c56 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56 2024-12-07T18:21:11,624 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56, entries=150, sequenceid=37, filesize=11.7 K 2024-12-07T18:21:11,625 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b2a15b0d424567342f9c04e70cd08f88 in 879ms, sequenceid=37, compaction requested=false 2024-12-07T18:21:11,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:11,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:11,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-07T18:21:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-07T18:21:11,627 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-07T18:21:11,627 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9490 sec 2024-12-07T18:21:11,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.9530 sec 2024-12-07T18:21:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-07T18:21:11,780 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-07T18:21:11,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-07T18:21:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:11,783 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:11,784 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:11,784 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:11,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:11,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-07T18:21:11,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:11,936 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:11,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:11,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/0909dac0ebbe4d2cb708808006706e87 is 50, key is test_row_0/A:col10/1733595670852/Put/seqid=0 2024-12-07T18:21:11,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742302_1478 (size=12001) 2024-12-07T18:21:11,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:11,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:12,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595732012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595732014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595732040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595732040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595732041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:12,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595732141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595732141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595732150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595732150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595732150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595732345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595732346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,354 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/0909dac0ebbe4d2cb708808006706e87 2024-12-07T18:21:12,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595732353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595732354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2b0925e4d5234c3ea0b50310d1f03e9d is 50, key is test_row_0/B:col10/1733595670852/Put/seqid=0 2024-12-07T18:21:12,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742303_1479 (size=12001) 2024-12-07T18:21:12,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595732362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:12,634 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:21:12,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595732652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595732653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595732661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595732662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595732670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:12,765 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2b0925e4d5234c3ea0b50310d1f03e9d 2024-12-07T18:21:12,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/95fdfd801cd64688abb88d6e701c5938 is 50, key is test_row_0/C:col10/1733595670852/Put/seqid=0 2024-12-07T18:21:12,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742304_1480 (size=12001) 2024-12-07T18:21:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:13,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595733160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:13,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595733161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595733168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:13,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595733171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:13,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595733175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:13,189 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/95fdfd801cd64688abb88d6e701c5938 2024-12-07T18:21:13,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/0909dac0ebbe4d2cb708808006706e87 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87 2024-12-07T18:21:13,197 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87, entries=150, sequenceid=48, filesize=11.7 K 2024-12-07T18:21:13,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2b0925e4d5234c3ea0b50310d1f03e9d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d 2024-12-07T18:21:13,202 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d, entries=150, sequenceid=48, filesize=11.7 K 2024-12-07T18:21:13,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/95fdfd801cd64688abb88d6e701c5938 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938 2024-12-07T18:21:13,206 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938, entries=150, sequenceid=48, filesize=11.7 K 2024-12-07T18:21:13,206 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b2a15b0d424567342f9c04e70cd08f88 in 1270ms, sequenceid=48, compaction requested=true 2024-12-07T18:21:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-07T18:21:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-07T18:21:13,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-07T18:21:13,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4240 sec 2024-12-07T18:21:13,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.4280 sec 2024-12-07T18:21:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T18:21:13,887 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-07T18:21:13,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-07T18:21:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-07T18:21:13,889 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:13,890 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:13,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-07T18:21:14,042 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:14,043 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:14,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:14,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/6da7c82f5ced464085972eb5451ad99b is 50, key is test_row_0/A:col10/1733595672011/Put/seqid=0 2024-12-07T18:21:14,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742305_1481 (size=12001) 2024-12-07T18:21:14,054 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/6da7c82f5ced464085972eb5451ad99b 2024-12-07T18:21:14,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/816bd78ced3e4fcfbf18739030f42e69 is 50, key is test_row_0/B:col10/1733595672011/Put/seqid=0 2024-12-07T18:21:14,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742306_1482 (size=12001) 2024-12-07T18:21:14,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:14,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:14,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595734181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595734182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595734183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595734183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595734185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-07T18:21:14,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595734288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595734289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595734290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595734290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,474 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/816bd78ced3e4fcfbf18739030f42e69 2024-12-07T18:21:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/a0796de97ab94bc2b9ee24ae0749ee7a is 50, key is test_row_0/C:col10/1733595672011/Put/seqid=0 2024-12-07T18:21:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742307_1483 (size=12001) 2024-12-07T18:21:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-07T18:21:14,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595734497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595734498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595734498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595734499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595734801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595734801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595734803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:14,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595734806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:14,886 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/a0796de97ab94bc2b9ee24ae0749ee7a 2024-12-07T18:21:14,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/6da7c82f5ced464085972eb5451ad99b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b 2024-12-07T18:21:14,894 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b, entries=150, sequenceid=74, filesize=11.7 K 2024-12-07T18:21:14,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/816bd78ced3e4fcfbf18739030f42e69 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69 2024-12-07T18:21:14,899 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69, entries=150, sequenceid=74, filesize=11.7 K 2024-12-07T18:21:14,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/a0796de97ab94bc2b9ee24ae0749ee7a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a 2024-12-07T18:21:14,913 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a, entries=150, sequenceid=74, filesize=11.7 K 2024-12-07T18:21:14,914 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b2a15b0d424567342f9c04e70cd08f88 in 871ms, sequenceid=74, compaction requested=true 2024-12-07T18:21:14,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:14,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:14,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-07T18:21:14,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-07T18:21:14,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-07T18:21:14,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0250 sec 2024-12-07T18:21:14,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.0290 sec 2024-12-07T18:21:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-07T18:21:14,993 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-07T18:21:14,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-07T18:21:14,995 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-07T18:21:14,996 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:14,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-07T18:21:15,147 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-07T18:21:15,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:15,148 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-07T18:21:15,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:15,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/21322693ec5b48958007cf74f2967b55 is 50, key is test_row_0/A:col10/1733595674182/Put/seqid=0 2024-12-07T18:21:15,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742308_1484 (size=12001) 2024-12-07T18:21:15,157 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/21322693ec5b48958007cf74f2967b55 2024-12-07T18:21:15,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/8b39fd1c343444c0a69f28d15f864d03 is 50, key is test_row_0/B:col10/1733595674182/Put/seqid=0 2024-12-07T18:21:15,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742309_1485 (size=12001) 2024-12-07T18:21:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-07T18:21:15,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:15,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595735354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595735355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595735359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595735362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595735463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595735463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595735466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595735472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,566 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/8b39fd1c343444c0a69f28d15f864d03 2024-12-07T18:21:15,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c1923b3719a149d598e5308e9318a76b is 50, key is test_row_0/C:col10/1733595674182/Put/seqid=0 2024-12-07T18:21:15,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742310_1486 (size=12001) 2024-12-07T18:21:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-07T18:21:15,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595735670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595735670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595735670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595735676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,978 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c1923b3719a149d598e5308e9318a76b 2024-12-07T18:21:15,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/21322693ec5b48958007cf74f2967b55 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55 2024-12-07T18:21:15,985 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55, entries=150, sequenceid=84, filesize=11.7 K 2024-12-07T18:21:15,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595735981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/8b39fd1c343444c0a69f28d15f864d03 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03 2024-12-07T18:21:15,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595735981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595735981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:15,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595735982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:15,989 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03, entries=150, sequenceid=84, filesize=11.7 K 2024-12-07T18:21:15,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c1923b3719a149d598e5308e9318a76b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b 2024-12-07T18:21:15,993 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b, entries=150, sequenceid=84, filesize=11.7 K 2024-12-07T18:21:15,993 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for b2a15b0d424567342f9c04e70cd08f88 in 845ms, sequenceid=84, compaction requested=true 2024-12-07T18:21:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-07T18:21:15,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-07T18:21:15,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-07T18:21:15,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 998 msec 2024-12-07T18:21:15,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.0020 sec 2024-12-07T18:21:16,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-07T18:21:16,099 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-07T18:21:16,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:16,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-07T18:21:16,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:16,103 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:16,104 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:16,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:16,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/2089d527ea6248b594e94bd51c9cfb77 is 50, key is test_row_0/A:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:16,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742311_1487 (size=14341) 2024-12-07T18:21:16,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/2089d527ea6248b594e94bd51c9cfb77 2024-12-07T18:21:16,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ec515b7d0ea3476bab771c96ece70183 is 50, key is test_row_0/B:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742312_1488 (size=12001) 2024-12-07T18:21:16,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595736239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,255 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-07T18:21:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595736349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:16,408 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-07T18:21:16,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:16,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595736488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595736489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595736489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595736492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,560 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-07T18:21:16,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:16,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595736556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:16,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ec515b7d0ea3476bab771c96ece70183 2024-12-07T18:21:16,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/f40fb889c61d455688fc106a592baaf8 is 50, key is test_row_0/C:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742313_1489 (size=12001) 2024-12-07T18:21:16,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/f40fb889c61d455688fc106a592baaf8 2024-12-07T18:21:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/2089d527ea6248b594e94bd51c9cfb77 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77 2024-12-07T18:21:16,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77, entries=200, sequenceid=111, filesize=14.0 K 2024-12-07T18:21:16,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ec515b7d0ea3476bab771c96ece70183 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183 2024-12-07T18:21:16,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183, entries=150, sequenceid=111, filesize=11.7 K 2024-12-07T18:21:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/f40fb889c61d455688fc106a592baaf8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8 2024-12-07T18:21:16,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8, entries=150, sequenceid=111, filesize=11.7 K 2024-12-07T18:21:16,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for b2a15b0d424567342f9c04e70cd08f88 in 462ms, sequenceid=111, compaction requested=true 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:16,656 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:16,656 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:16,657 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74346 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-07T18:21:16,657 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-07T18:21:16,657 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:16,657 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:16,658 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,658 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,658 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=72.6 K 2024-12-07T18:21:16,658 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=70.3 K 2024-12-07T18:21:16,658 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e798c5eb2c8747dd9796092a068d7c9f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733595669680 2024-12-07T18:21:16,658 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 30af0008e9ad4955a3eaac480c51265a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733595669680 2024-12-07T18:21:16,658 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 644a3d7a808341d6823130a5d6f3b7f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595669722 2024-12-07T18:21:16,658 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d22cf9b23854c508b2624e63730c6a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595669722 2024-12-07T18:21:16,658 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0909dac0ebbe4d2cb708808006706e87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733595670852 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b0925e4d5234c3ea0b50310d1f03e9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733595670852 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 816bd78ced3e4fcfbf18739030f42e69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733595672011 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6da7c82f5ced464085972eb5451ad99b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733595672011 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21322693ec5b48958007cf74f2967b55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733595674181 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b39fd1c343444c0a69f28d15f864d03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733595674181 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2089d527ea6248b594e94bd51c9cfb77, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:16,659 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ec515b7d0ea3476bab771c96ece70183, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:16,674 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#411 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:16,675 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/fc6cc90ce30145448cd919988dda5d00 is 50, key is test_row_0/A:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,683 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#412 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:16,684 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/6202f403c8d640e6a4ba445f8104c2ae is 50, key is test_row_0/B:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742314_1490 (size=12207) 2024-12-07T18:21:16,695 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/fc6cc90ce30145448cd919988dda5d00 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/fc6cc90ce30145448cd919988dda5d00 2024-12-07T18:21:16,701 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into fc6cc90ce30145448cd919988dda5d00(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:16,701 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:16,701 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=10, startTime=1733595676656; duration=0sec 2024-12-07T18:21:16,701 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:16,701 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:16,701 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-07T18:21:16,703 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-07T18:21:16,703 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:16,703 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,703 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=70.3 K 2024-12-07T18:21:16,703 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c2b029dc9a842d7b05901b86b3cefc6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733595669680 2024-12-07T18:21:16,704 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd73b93728e74e0087f63e7e7f979c56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733595669722 2024-12-07T18:21:16,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:16,704 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95fdfd801cd64688abb88d6e701c5938, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733595670852 2024-12-07T18:21:16,704 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0796de97ab94bc2b9ee24ae0749ee7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733595672011 2024-12-07T18:21:16,705 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1923b3719a149d598e5308e9318a76b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733595674181 2024-12-07T18:21:16,705 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting f40fb889c61d455688fc106a592baaf8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:16,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742315_1491 (size=12207) 2024-12-07T18:21:16,713 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/6202f403c8d640e6a4ba445f8104c2ae as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/6202f403c8d640e6a4ba445f8104c2ae 2024-12-07T18:21:16,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:16,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-07T18:21:16,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:16,714 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-07T18:21:16,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:16,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:16,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:16,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:16,719 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 6202f403c8d640e6a4ba445f8104c2ae(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:16,719 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:16,719 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=10, startTime=1733595676656; duration=0sec 2024-12-07T18:21:16,719 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:16,719 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:16,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4e2cc6b221da41778ee121e46bfb2780 is 50, key is test_row_0/A:col10/1733595676220/Put/seqid=0 2024-12-07T18:21:16,723 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:16,724 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/79590bf70020415bbf3cae5302b10360 is 50, key is test_row_0/C:col10/1733595676193/Put/seqid=0 2024-12-07T18:21:16,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742317_1493 (size=12207) 2024-12-07T18:21:16,755 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/79590bf70020415bbf3cae5302b10360 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/79590bf70020415bbf3cae5302b10360 2024-12-07T18:21:16,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742316_1492 (size=12001) 2024-12-07T18:21:16,761 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 79590bf70020415bbf3cae5302b10360(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:16,761 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:16,761 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=10, startTime=1733595676656; duration=0sec 2024-12-07T18:21:16,761 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:16,761 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:16,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:16,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:16,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595736968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595737074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,160 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4e2cc6b221da41778ee121e46bfb2780 2024-12-07T18:21:17,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bb17ac27f9f94bfc9fb59f52b5c921e7 is 50, key is test_row_0/B:col10/1733595676220/Put/seqid=0 2024-12-07T18:21:17,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742318_1494 (size=12001) 2024-12-07T18:21:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595737279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595737495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595737496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595737497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595737502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,571 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bb17ac27f9f94bfc9fb59f52b5c921e7 2024-12-07T18:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ece21e4f170646d5ad0c9abe56e69b90 is 50, key is test_row_0/C:col10/1733595676220/Put/seqid=0 2024-12-07T18:21:17,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742319_1495 (size=12001) 2024-12-07T18:21:17,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595737583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:17,981 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ece21e4f170646d5ad0c9abe56e69b90 2024-12-07T18:21:17,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4e2cc6b221da41778ee121e46bfb2780 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780 2024-12-07T18:21:17,988 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780, entries=150, sequenceid=121, filesize=11.7 K 2024-12-07T18:21:17,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bb17ac27f9f94bfc9fb59f52b5c921e7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7 2024-12-07T18:21:17,992 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7, entries=150, sequenceid=121, filesize=11.7 K 2024-12-07T18:21:17,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ece21e4f170646d5ad0c9abe56e69b90 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90 2024-12-07T18:21:17,995 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90, entries=150, sequenceid=121, filesize=11.7 K 2024-12-07T18:21:17,996 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for b2a15b0d424567342f9c04e70cd08f88 in 1282ms, sequenceid=121, compaction requested=false 2024-12-07T18:21:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-07T18:21:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-07T18:21:17,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-07T18:21:17,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8930 sec 2024-12-07T18:21:17,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.8980 sec 2024-12-07T18:21:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:18,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e9f5cd96c7db48188aeba3df25d6205a is 50, key is test_row_0/A:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742320_1496 (size=14541) 2024-12-07T18:21:18,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e9f5cd96c7db48188aeba3df25d6205a 2024-12-07T18:21:18,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/abb6d2a6c43c44318633cddda2e41642 is 50, key is test_row_0/B:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:18,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595738117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742321_1497 (size=12151) 2024-12-07T18:21:18,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/abb6d2a6c43c44318633cddda2e41642 2024-12-07T18:21:18,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/55281eb4b7ba4ea1b437b0f81ea1a19e is 50, key is test_row_0/C:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742322_1498 (size=12151) 2024-12-07T18:21:18,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/55281eb4b7ba4ea1b437b0f81ea1a19e 2024-12-07T18:21:18,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/e9f5cd96c7db48188aeba3df25d6205a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a 2024-12-07T18:21:18,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a, entries=200, sequenceid=151, filesize=14.2 K 2024-12-07T18:21:18,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/abb6d2a6c43c44318633cddda2e41642 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642 2024-12-07T18:21:18,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642, entries=150, sequenceid=151, filesize=11.9 K 2024-12-07T18:21:18,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/55281eb4b7ba4ea1b437b0f81ea1a19e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e 2024-12-07T18:21:18,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e, entries=150, sequenceid=151, filesize=11.9 K 2024-12-07T18:21:18,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for b2a15b0d424567342f9c04e70cd08f88 in 67ms, sequenceid=151, compaction requested=true 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:18,161 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:18,161 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:18,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:18,162 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38749 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:18,162 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:18,162 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,162 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/fc6cc90ce30145448cd919988dda5d00, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=37.8 K 2024-12-07T18:21:18,163 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:18,163 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:18,163 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,163 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/6202f403c8d640e6a4ba445f8104c2ae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=35.5 K 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc6cc90ce30145448cd919988dda5d00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6202f403c8d640e6a4ba445f8104c2ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e2cc6b221da41778ee121e46bfb2780, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595676220 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting bb17ac27f9f94bfc9fb59f52b5c921e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595676220 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting abb6d2a6c43c44318633cddda2e41642, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:18,164 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9f5cd96c7db48188aeba3df25d6205a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:18,178 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:18,178 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/5396969203f541a8aecfc9ae3f12c9a5 is 50, key is test_row_0/A:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,179 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:18,179 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/48ab0739cef743dbbd741e528c1d82dd is 50, key is test_row_0/B:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742324_1500 (size=12459) 2024-12-07T18:21:18,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742323_1499 (size=12459) 2024-12-07T18:21:18,199 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/5396969203f541a8aecfc9ae3f12c9a5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/5396969203f541a8aecfc9ae3f12c9a5 2024-12-07T18:21:18,205 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 5396969203f541a8aecfc9ae3f12c9a5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:18,205 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:18,205 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595678161; duration=0sec 2024-12-07T18:21:18,205 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:18,205 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:18,205 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:18,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-07T18:21:18,205 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-07T18:21:18,207 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:18,207 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:18,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:18,207 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,207 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/79590bf70020415bbf3cae5302b10360, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=35.5 K 2024-12-07T18:21:18,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-07T18:21:18,208 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79590bf70020415bbf3cae5302b10360, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733595675346 2024-12-07T18:21:18,208 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:18,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:18,209 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:18,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:18,209 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ece21e4f170646d5ad0c9abe56e69b90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733595676220 2024-12-07T18:21:18,210 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55281eb4b7ba4ea1b437b0f81ea1a19e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:18,222 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#422 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:18,223 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4a345eec47cd4a8fb627418bfe464041 is 50, key is test_row_0/C:col10/1733595676961/Put/seqid=0 2024-12-07T18:21:18,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742325_1501 (size=12459) 2024-12-07T18:21:18,240 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4a345eec47cd4a8fb627418bfe464041 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a345eec47cd4a8fb627418bfe464041 2024-12-07T18:21:18,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:18,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:18,246 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 4a345eec47cd4a8fb627418bfe464041(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:18,246 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:18,246 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595678161; duration=0sec 2024-12-07T18:21:18,246 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:18,246 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:18,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/d74ae3d1e7a0407b90948424901de5f3 is 50, key is test_row_0/A:col10/1733595678243/Put/seqid=0 2024-12-07T18:21:18,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742326_1502 (size=14541) 2024-12-07T18:21:18,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/d74ae3d1e7a0407b90948424901de5f3 2024-12-07T18:21:18,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bcd181b9204f46a7bc1bf759e5531756 is 50, key is test_row_0/B:col10/1733595678243/Put/seqid=0 2024-12-07T18:21:18,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742327_1503 (size=12151) 2024-12-07T18:21:18,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:18,361 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:18,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:18,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:18,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595738370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595738476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:18,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:18,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:18,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,589 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/48ab0739cef743dbbd741e528c1d82dd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/48ab0739cef743dbbd741e528c1d82dd 2024-12-07T18:21:18,592 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 48ab0739cef743dbbd741e528c1d82dd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:18,592 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:18,592 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595678161; duration=0sec 2024-12-07T18:21:18,592 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:18,592 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:18,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:18,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:18,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bcd181b9204f46a7bc1bf759e5531756 2024-12-07T18:21:18,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595738679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8cdc1ff0e9e14099bdae517fb7e5114b is 50, key is test_row_0/C:col10/1733595678243/Put/seqid=0 2024-12-07T18:21:18,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742328_1504 (size=12151) 2024-12-07T18:21:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:18,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:18,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:18,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,973 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:18,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:18,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:18,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:18,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:18,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595738986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8cdc1ff0e9e14099bdae517fb7e5114b 2024-12-07T18:21:19,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/d74ae3d1e7a0407b90948424901de5f3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3 2024-12-07T18:21:19,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3, entries=200, sequenceid=163, filesize=14.2 K 2024-12-07T18:21:19,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/bcd181b9204f46a7bc1bf759e5531756 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756 2024-12-07T18:21:19,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756, entries=150, sequenceid=163, filesize=11.9 K 2024-12-07T18:21:19,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/8cdc1ff0e9e14099bdae517fb7e5114b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b 2024-12-07T18:21:19,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b, entries=150, sequenceid=163, filesize=11.9 K 2024-12-07T18:21:19,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b2a15b0d424567342f9c04e70cd08f88 in 865ms, sequenceid=163, compaction requested=false 2024-12-07T18:21:19,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:19,126 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-07T18:21:19,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:19,126 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:19,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:19,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:19,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:19,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/a8d05c7b5e244696b45eb85d254efdc9 is 50, key is test_row_0/A:col10/1733595678340/Put/seqid=0 2024-12-07T18:21:19,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742329_1505 (size=12151) 2024-12-07T18:21:19,135 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/a8d05c7b5e244696b45eb85d254efdc9 2024-12-07T18:21:19,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/41856ea46a754b938600b972fca6d71e is 50, key is test_row_0/B:col10/1733595678340/Put/seqid=0 2024-12-07T18:21:19,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742330_1506 (size=12151) 2024-12-07T18:21:19,145 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/41856ea46a754b938600b972fca6d71e 2024-12-07T18:21:19,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/759ab423d84445ed83f695b72f4706d3 is 50, key is test_row_0/C:col10/1733595678340/Put/seqid=0 2024-12-07T18:21:19,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742331_1507 (size=12151) 2024-12-07T18:21:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:19,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:19,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:19,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595739515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595739518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595739519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595739520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595739526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,536 DEBUG [Thread-2095 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:19,574 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/759ab423d84445ed83f695b72f4706d3 2024-12-07T18:21:19,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/a8d05c7b5e244696b45eb85d254efdc9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9 2024-12-07T18:21:19,581 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9, entries=150, sequenceid=190, filesize=11.9 K 2024-12-07T18:21:19,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/41856ea46a754b938600b972fca6d71e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e 2024-12-07T18:21:19,584 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e, entries=150, sequenceid=190, filesize=11.9 K 2024-12-07T18:21:19,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/759ab423d84445ed83f695b72f4706d3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3 2024-12-07T18:21:19,587 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3, entries=150, sequenceid=190, filesize=11.9 K 2024-12-07T18:21:19,588 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b2a15b0d424567342f9c04e70cd08f88 in 462ms, sequenceid=190, compaction requested=true 2024-12-07T18:21:19,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:19,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:19,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-07T18:21:19,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-07T18:21:19,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-07T18:21:19,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3800 sec 2024-12-07T18:21:19,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.3840 sec 2024-12-07T18:21:19,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:19,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:19,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:19,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4826ae27aa03474985dad7b2396fcf19 is 50, key is test_row_0/A:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:19,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742332_1508 (size=14541) 2024-12-07T18:21:19,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595739663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595739663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595739664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595739665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595739774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595739774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595739774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595739775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595739979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595739979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595739979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:19,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595739979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4826ae27aa03474985dad7b2396fcf19 2024-12-07T18:21:20,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/c1bb5fea423346fd9013473be3120f7e is 50, key is test_row_0/B:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:20,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742333_1509 (size=12151) 2024-12-07T18:21:20,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595740286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595740286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595740286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595740290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T18:21:20,312 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-07T18:21:20,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:20,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-07T18:21:20,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:20,315 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:20,316 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:20,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:20,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:20,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/c1bb5fea423346fd9013473be3120f7e 2024-12-07T18:21:20,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/3c4310754680496e9bf1ceb8e4798a13 is 50, key is test_row_0/C:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:20,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742334_1510 (size=12151) 2024-12-07T18:21:20,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-07T18:21:20,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:20,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:20,620 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-07T18:21:20,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:20,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-07T18:21:20,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:20,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595740792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595740792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595740792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595740795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/3c4310754680496e9bf1ceb8e4798a13 2024-12-07T18:21:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4826ae27aa03474985dad7b2396fcf19 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19 2024-12-07T18:21:20,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19, entries=200, sequenceid=202, filesize=14.2 K 2024-12-07T18:21:20,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/c1bb5fea423346fd9013473be3120f7e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e 2024-12-07T18:21:20,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e, entries=150, sequenceid=202, filesize=11.9 K 2024-12-07T18:21:20,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/3c4310754680496e9bf1ceb8e4798a13 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13 2024-12-07T18:21:20,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13, entries=150, sequenceid=202, filesize=11.9 K 2024-12-07T18:21:20,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b2a15b0d424567342f9c04e70cd08f88 in 1250ms, sequenceid=202, compaction requested=true 2024-12-07T18:21:20,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:20,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:20,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:20,875 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:21:20,875 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:21:20,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:20,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:20,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:20,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:20,876 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53692 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:21:20,876 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:20,876 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,876 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/5396969203f541a8aecfc9ae3f12c9a5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=52.4 K 2024-12-07T18:21:20,877 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5396969203f541a8aecfc9ae3f12c9a5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:20,877 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d74ae3d1e7a0407b90948424901de5f3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733595678115 2024-12-07T18:21:20,877 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:21:20,877 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:20,877 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8d05c7b5e244696b45eb85d254efdc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733595678340 2024-12-07T18:21:20,877 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,877 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/48ab0739cef743dbbd741e528c1d82dd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=47.8 K 2024-12-07T18:21:20,878 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4826ae27aa03474985dad7b2396fcf19, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:20,878 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 48ab0739cef743dbbd741e528c1d82dd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:20,878 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd181b9204f46a7bc1bf759e5531756, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733595678115 2024-12-07T18:21:20,878 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 41856ea46a754b938600b972fca6d71e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733595678340 2024-12-07T18:21:20,879 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c1bb5fea423346fd9013473be3120f7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:20,887 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#432 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:20,888 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/8c4b246da80549dd8a3f350541e44ad0 is 50, key is test_row_0/A:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:20,891 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#433 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:20,891 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/659642dc2b3a4da086c8b0a9ecf7212b is 50, key is test_row_0/B:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:20,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742335_1511 (size=12595) 2024-12-07T18:21:20,902 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/8c4b246da80549dd8a3f350541e44ad0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/8c4b246da80549dd8a3f350541e44ad0 2024-12-07T18:21:20,907 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 8c4b246da80549dd8a3f350541e44ad0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:20,907 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:20,908 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=12, startTime=1733595680875; duration=0sec 2024-12-07T18:21:20,908 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:20,908 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:20,908 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T18:21:20,909 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-07T18:21:20,909 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:20,909 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,909 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a345eec47cd4a8fb627418bfe464041, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=47.8 K 2024-12-07T18:21:20,909 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a345eec47cd4a8fb627418bfe464041, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733595676940 2024-12-07T18:21:20,910 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cdc1ff0e9e14099bdae517fb7e5114b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733595678115 2024-12-07T18:21:20,910 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 759ab423d84445ed83f695b72f4706d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733595678340 2024-12-07T18:21:20,910 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c4310754680496e9bf1ceb8e4798a13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:20,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742336_1512 (size=12595) 2024-12-07T18:21:20,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:20,921 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/659642dc2b3a4da086c8b0a9ecf7212b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/659642dc2b3a4da086c8b0a9ecf7212b 2024-12-07T18:21:20,923 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#434 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:20,924 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/e738b5dd0926494aa3f26c73582f13c2 is 50, key is test_row_0/C:col10/1733595679623/Put/seqid=0 2024-12-07T18:21:20,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:20,926 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 659642dc2b3a4da086c8b0a9ecf7212b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:20,926 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:20,926 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=12, startTime=1733595680875; duration=0sec 2024-12-07T18:21:20,926 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:20,926 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:20,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-07T18:21:20,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:20,927 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:20,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1227a80d00d34e09b5efa9d2c0972888 is 50, key is test_row_0/A:col10/1733595679663/Put/seqid=0 2024-12-07T18:21:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742337_1513 (size=12595) 2024-12-07T18:21:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742338_1514 (size=12151) 2024-12-07T18:21:21,375 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/e738b5dd0926494aa3f26c73582f13c2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/e738b5dd0926494aa3f26c73582f13c2 2024-12-07T18:21:21,379 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into e738b5dd0926494aa3f26c73582f13c2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:21,379 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:21,379 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=12, startTime=1733595680876; duration=0sec 2024-12-07T18:21:21,379 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:21,379 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:21,386 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1227a80d00d34e09b5efa9d2c0972888 2024-12-07T18:21:21,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/4ea8e405b15a4bfab29b3f16f3a25d9b is 50, key is test_row_0/B:col10/1733595679663/Put/seqid=0 2024-12-07T18:21:21,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742339_1515 (size=12151) 2024-12-07T18:21:21,399 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/4ea8e405b15a4bfab29b3f16f3a25d9b 2024-12-07T18:21:21,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c3c5d2da4bfa4cc7ab7acf4165dda763 is 50, key is test_row_0/C:col10/1733595679663/Put/seqid=0 2024-12-07T18:21:21,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742340_1516 (size=12151) 2024-12-07T18:21:21,414 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c3c5d2da4bfa4cc7ab7acf4165dda763 2024-12-07T18:21:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:21,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1227a80d00d34e09b5efa9d2c0972888 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888 2024-12-07T18:21:21,425 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888, entries=150, sequenceid=229, filesize=11.9 K 2024-12-07T18:21:21,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/4ea8e405b15a4bfab29b3f16f3a25d9b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b 2024-12-07T18:21:21,430 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b, entries=150, sequenceid=229, filesize=11.9 K 2024-12-07T18:21:21,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c3c5d2da4bfa4cc7ab7acf4165dda763 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763 2024-12-07T18:21:21,435 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763, entries=150, sequenceid=229, filesize=11.9 K 2024-12-07T18:21:21,436 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for b2a15b0d424567342f9c04e70cd08f88 in 510ms, sequenceid=229, compaction requested=false 2024-12-07T18:21:21,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:21,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:21,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-07T18:21:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-07T18:21:21,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-07T18:21:21,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1210 sec 2024-12-07T18:21:21,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.1250 sec 2024-12-07T18:21:21,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:21,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:21:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:21,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:21,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:21,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4fb3588c68694a549b238c6470357f16 is 50, key is test_row_0/A:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:21,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742341_1517 (size=16931) 2024-12-07T18:21:21,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595741851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595741858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595741858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595741861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595741962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595741968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595741968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:21,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595741971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595742165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595742174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595742175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595742176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4fb3588c68694a549b238c6470357f16 2024-12-07T18:21:22,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/682bf4b6fafd43ebb7ce8b78419bfea2 is 50, key is test_row_0/B:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:22,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742342_1518 (size=12151) 2024-12-07T18:21:22,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-07T18:21:22,420 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-07T18:21:22,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:22,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-07T18:21:22,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-07T18:21:22,423 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:22,423 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:22,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:22,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595742471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595742478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595742478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595742482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-07T18:21:22,574 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-07T18:21:22,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:22,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:22,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:22,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/682bf4b6fafd43ebb7ce8b78419bfea2 2024-12-07T18:21:22,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c240dcdc900d4e61bff5cb77bb989905 is 50, key is test_row_0/C:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:22,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742343_1519 (size=12151) 2024-12-07T18:21:22,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c240dcdc900d4e61bff5cb77bb989905 2024-12-07T18:21:22,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/4fb3588c68694a549b238c6470357f16 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16 2024-12-07T18:21:22,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16, entries=250, sequenceid=242, filesize=16.5 K 2024-12-07T18:21:22,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/682bf4b6fafd43ebb7ce8b78419bfea2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2 2024-12-07T18:21:22,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2, entries=150, sequenceid=242, filesize=11.9 K 2024-12-07T18:21:22,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/c240dcdc900d4e61bff5cb77bb989905 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905 2024-12-07T18:21:22,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905, entries=150, sequenceid=242, filesize=11.9 K 2024-12-07T18:21:22,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b2a15b0d424567342f9c04e70cd08f88 in 848ms, sequenceid=242, compaction requested=true 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:22,664 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:22,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:21:22,664 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:22,665 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,665 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/659642dc2b3a4da086c8b0a9ecf7212b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.0 K 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:22,665 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,665 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/8c4b246da80549dd8a3f350541e44ad0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=40.7 K 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c4b246da80549dd8a3f350541e44ad0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:22,665 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 659642dc2b3a4da086c8b0a9ecf7212b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:22,666 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1227a80d00d34e09b5efa9d2c0972888, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733595679662 2024-12-07T18:21:22,666 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ea8e405b15a4bfab29b3f16f3a25d9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733595679662 2024-12-07T18:21:22,666 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb3588c68694a549b238c6470357f16, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:22,666 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 682bf4b6fafd43ebb7ce8b78419bfea2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:22,675 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:22,675 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:22,675 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2b712731248d445297cbf739a542cacb is 50, key is test_row_0/B:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:22,676 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/40b12b12dc2e4c778f8570b83c54a3fb is 50, key is test_row_0/A:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:22,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742344_1520 (size=12697) 2024-12-07T18:21:22,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742345_1521 (size=12697) 2024-12-07T18:21:22,698 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2b712731248d445297cbf739a542cacb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b712731248d445297cbf739a542cacb 2024-12-07T18:21:22,704 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 2b712731248d445297cbf739a542cacb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:22,704 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:22,704 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595682664; duration=0sec 2024-12-07T18:21:22,704 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:22,704 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:22,704 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:22,706 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:22,706 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:22,706 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,706 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/e738b5dd0926494aa3f26c73582f13c2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.0 K 2024-12-07T18:21:22,706 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting e738b5dd0926494aa3f26c73582f13c2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733595679507 2024-12-07T18:21:22,707 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c3c5d2da4bfa4cc7ab7acf4165dda763, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733595679662 2024-12-07T18:21:22,707 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c240dcdc900d4e61bff5cb77bb989905, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:22,722 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:22,723 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/1144c91ca3c64e258d76b415b0170f97 is 50, key is test_row_0/C:col10/1733595681814/Put/seqid=0 2024-12-07T18:21:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-07T18:21:22,726 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:22,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-07T18:21:22,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:22,727 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:21:22,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:22,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:22,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:22,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:22,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:22,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:22,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742346_1522 (size=12697) 2024-12-07T18:21:22,746 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/1144c91ca3c64e258d76b415b0170f97 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1144c91ca3c64e258d76b415b0170f97 2024-12-07T18:21:22,750 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 1144c91ca3c64e258d76b415b0170f97(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:22,750 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:22,750 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595682664; duration=0sec 2024-12-07T18:21:22,751 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:22,751 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:22,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/3eaaaacfb8914b308e764d53e372332a is 50, key is test_row_0/A:col10/1733595681859/Put/seqid=0 2024-12-07T18:21:22,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742347_1523 (size=12301) 2024-12-07T18:21:22,760 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/3eaaaacfb8914b308e764d53e372332a 2024-12-07T18:21:22,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2d857f6225144f31a8919ec25a9d636a is 50, key is test_row_0/B:col10/1733595681859/Put/seqid=0 2024-12-07T18:21:22,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742348_1524 (size=12301) 2024-12-07T18:21:22,779 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2d857f6225144f31a8919ec25a9d636a 2024-12-07T18:21:22,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4ade2ca5230d4de2ac7da424eac20d04 is 50, key is test_row_0/C:col10/1733595681859/Put/seqid=0 2024-12-07T18:21:22,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742349_1525 (size=12301) 2024-12-07T18:21:22,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:23,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595742993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595742994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595742995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-07T18:21:23,102 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/40b12b12dc2e4c778f8570b83c54a3fb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/40b12b12dc2e4c778f8570b83c54a3fb 2024-12-07T18:21:23,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595743103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595743103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595743103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,116 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 40b12b12dc2e4c778f8570b83c54a3fb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:23,116 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:23,116 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595682664; duration=0sec 2024-12-07T18:21:23,117 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:23,117 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:23,208 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4ade2ca5230d4de2ac7da424eac20d04 2024-12-07T18:21:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/3eaaaacfb8914b308e764d53e372332a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a 2024-12-07T18:21:23,214 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a, entries=150, sequenceid=267, filesize=12.0 K 2024-12-07T18:21:23,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/2d857f6225144f31a8919ec25a9d636a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a 2024-12-07T18:21:23,218 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a, entries=150, sequenceid=267, filesize=12.0 K 2024-12-07T18:21:23,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4ade2ca5230d4de2ac7da424eac20d04 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04 2024-12-07T18:21:23,222 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04, entries=150, sequenceid=267, filesize=12.0 K 2024-12-07T18:21:23,223 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b2a15b0d424567342f9c04e70cd08f88 in 496ms, sequenceid=267, compaction requested=false 2024-12-07T18:21:23,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:23,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-07T18:21:23,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-07T18:21:23,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-07T18:21:23,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 801 msec 2024-12-07T18:21:23,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 805 msec 2024-12-07T18:21:23,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:23,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:23,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/c851f670429949cc8cfc2ce623a395cc is 50, key is test_row_0/A:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742350_1526 (size=14741) 2024-12-07T18:21:23,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595743345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595743346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595743348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595743449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595743451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595743454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-07T18:21:23,526 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-07T18:21:23,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:23,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-07T18:21:23,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:23,529 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:23,529 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:23,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:23,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56926 deadline: 1733595743573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,578 DEBUG [Thread-2095 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8223 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., hostname=8a7a030b35db,45237,1733595542335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:23,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595743655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595743656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595743658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:23,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:23,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/c851f670429949cc8cfc2ce623a395cc 2024-12-07T18:21:23,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/993bed6fe6a644ceac1d5454d09d0ffc is 50, key is test_row_0/B:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:23,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742351_1527 (size=12301) 2024-12-07T18:21:23,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:23,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595743961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595743963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595743964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:23,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595743970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,986 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:23,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:23,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:23,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:23,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:23,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:24,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:24,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/993bed6fe6a644ceac1d5454d09d0ffc 2024-12-07T18:21:24,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b48c915b9c0d48ed8845895ca975869d is 50, key is test_row_0/C:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:24,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742352_1528 (size=12301) 2024-12-07T18:21:24,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:24,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:24,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:24,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:24,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:24,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595744467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595744471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595744474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595744480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b48c915b9c0d48ed8845895ca975869d 2024-12-07T18:21:24,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/c851f670429949cc8cfc2ce623a395cc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc 2024-12-07T18:21:24,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc, entries=200, sequenceid=284, filesize=14.4 K 2024-12-07T18:21:24,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/993bed6fe6a644ceac1d5454d09d0ffc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc 2024-12-07T18:21:24,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc, entries=150, sequenceid=284, filesize=12.0 K 2024-12-07T18:21:24,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b48c915b9c0d48ed8845895ca975869d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d 2024-12-07T18:21:24,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d, entries=150, sequenceid=284, filesize=12.0 K 2024-12-07T18:21:24,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for b2a15b0d424567342f9c04e70cd08f88 in 1259ms, sequenceid=284, compaction requested=true 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:24,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:21:24,570 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:24,570 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:24,571 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:24,571 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:24,571 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,572 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b712731248d445297cbf739a542cacb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.4 K 2024-12-07T18:21:24,572 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39739 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:24,572 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:24,572 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,572 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/40b12b12dc2e4c778f8570b83c54a3fb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=38.8 K 2024-12-07T18:21:24,572 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40b12b12dc2e4c778f8570b83c54a3fb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:24,572 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b712731248d445297cbf739a542cacb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:24,573 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3eaaaacfb8914b308e764d53e372332a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733595681849 2024-12-07T18:21:24,573 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d857f6225144f31a8919ec25a9d636a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733595681849 2024-12-07T18:21:24,573 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c851f670429949cc8cfc2ce623a395cc, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595682986 2024-12-07T18:21:24,573 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 993bed6fe6a644ceac1d5454d09d0ffc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595683000 2024-12-07T18:21:24,591 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:24,592 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/19244919906e45978297b1c9f923086d is 50, key is test_row_0/A:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:24,594 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#451 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:24,595 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/3753a76f94a4441784ce694790dbc661 is 50, key is test_row_0/B:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:24,597 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:24,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:24,598 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:24,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:24,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742353_1529 (size=12949) 2024-12-07T18:21:24,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/75646edf3f3f43d4be9ce7d61f1eee30 is 50, key is test_row_0/A:col10/1733595683347/Put/seqid=0 2024-12-07T18:21:24,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742354_1530 (size=12949) 2024-12-07T18:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:24,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742355_1531 (size=12301) 2024-12-07T18:21:25,007 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/19244919906e45978297b1c9f923086d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19244919906e45978297b1c9f923086d 2024-12-07T18:21:25,010 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 19244919906e45978297b1c9f923086d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:25,010 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:25,010 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595684570; duration=0sec 2024-12-07T18:21:25,010 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:25,010 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:25,010 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:25,011 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:25,011 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:25,011 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,011 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1144c91ca3c64e258d76b415b0170f97, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.4 K 2024-12-07T18:21:25,012 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1144c91ca3c64e258d76b415b0170f97, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733595681809 2024-12-07T18:21:25,012 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ade2ca5230d4de2ac7da424eac20d04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733595681849 2024-12-07T18:21:25,012 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting b48c915b9c0d48ed8845895ca975869d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595683000 2024-12-07T18:21:25,019 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#453 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:25,020 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/6ef5b3a29cdf4c33a13ef464755b7adb is 50, key is test_row_0/C:col10/1733595683310/Put/seqid=0 2024-12-07T18:21:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742356_1532 (size=12949) 2024-12-07T18:21:25,033 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/3753a76f94a4441784ce694790dbc661 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/3753a76f94a4441784ce694790dbc661 2024-12-07T18:21:25,033 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/6ef5b3a29cdf4c33a13ef464755b7adb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/6ef5b3a29cdf4c33a13ef464755b7adb 2024-12-07T18:21:25,038 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 3753a76f94a4441784ce694790dbc661(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:25,038 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:25,038 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595684570; duration=0sec 2024-12-07T18:21:25,038 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:25,038 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:25,039 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/75646edf3f3f43d4be9ce7d61f1eee30 2024-12-07T18:21:25,040 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 6ef5b3a29cdf4c33a13ef464755b7adb(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:25,040 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:25,040 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595684570; duration=0sec 2024-12-07T18:21:25,040 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:25,040 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:25,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/d4a22ad2bda546d69142e4ef5791b971 is 50, key is test_row_0/B:col10/1733595683347/Put/seqid=0 2024-12-07T18:21:25,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742357_1533 (size=12301) 2024-12-07T18:21:25,050 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/d4a22ad2bda546d69142e4ef5791b971 2024-12-07T18:21:25,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/1a156616305442a4ae6f831258ce3cde is 50, key is test_row_0/C:col10/1733595683347/Put/seqid=0 2024-12-07T18:21:25,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742358_1534 (size=12301) 2024-12-07T18:21:25,080 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/1a156616305442a4ae6f831258ce3cde 2024-12-07T18:21:25,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/75646edf3f3f43d4be9ce7d61f1eee30 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30 2024-12-07T18:21:25,087 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30, entries=150, sequenceid=306, filesize=12.0 K 2024-12-07T18:21:25,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/d4a22ad2bda546d69142e4ef5791b971 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971 2024-12-07T18:21:25,090 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971, entries=150, sequenceid=306, filesize=12.0 K 2024-12-07T18:21:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/1a156616305442a4ae6f831258ce3cde as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde 2024-12-07T18:21:25,093 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde, entries=150, sequenceid=306, filesize=12.0 K 2024-12-07T18:21:25,094 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for b2a15b0d424567342f9c04e70cd08f88 in 496ms, sequenceid=306, compaction requested=false 2024-12-07T18:21:25,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:25,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-07T18:21:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-07T18:21:25,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-07T18:21:25,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5670 sec 2024-12-07T18:21:25,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.5700 sec 2024-12-07T18:21:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:25,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:25,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:25,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/dc41d4c160d1436092a7ac4f5be3ef44 is 50, key is test_row_0/A:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742359_1535 (size=17177) 2024-12-07T18:21:25,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/dc41d4c160d1436092a7ac4f5be3ef44 2024-12-07T18:21:25,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ac71a322419a4ecca885c8a18d75499c is 50, key is test_row_0/B:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595745529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595745529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595745530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595745536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742360_1536 (size=9857) 2024-12-07T18:21:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-07T18:21:25,633 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-07T18:21:25,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-07T18:21:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T18:21:25,636 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:25,636 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:25,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595745638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595745638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595745638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595745642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T18:21:25,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-07T18:21:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595745844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595745845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595745845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595745846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T18:21:25,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:25,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-07T18:21:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:25,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:25,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ac71a322419a4ecca885c8a18d75499c 2024-12-07T18:21:25,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/836e988d01ec4701a3f13d9b75529963 is 50, key is test_row_0/C:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:25,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742361_1537 (size=9857) 2024-12-07T18:21:26,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-07T18:21:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595746151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595746152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595746152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595746152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T18:21:26,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-07T18:21:26,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:26,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/836e988d01ec4701a3f13d9b75529963 2024-12-07T18:21:26,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/dc41d4c160d1436092a7ac4f5be3ef44 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44 2024-12-07T18:21:26,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44, entries=250, sequenceid=321, filesize=16.8 K 2024-12-07T18:21:26,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/ac71a322419a4ecca885c8a18d75499c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c 2024-12-07T18:21:26,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c, entries=100, sequenceid=321, filesize=9.6 K 2024-12-07T18:21:26,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/836e988d01ec4701a3f13d9b75529963 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963 2024-12-07T18:21:26,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963, entries=100, sequenceid=321, filesize=9.6 K 2024-12-07T18:21:26,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b2a15b0d424567342f9c04e70cd08f88 in 881ms, sequenceid=321, compaction requested=true 2024-12-07T18:21:26,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:26,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:26,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:26,372 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:26,372 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:26,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:26,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:26,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:26,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:26,372 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42427 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:26,373 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:26,373 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,373 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19244919906e45978297b1c9f923086d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=41.4 K 2024-12-07T18:21:26,373 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19244919906e45978297b1c9f923086d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595683000 2024-12-07T18:21:26,373 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:26,373 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:26,373 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,373 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/3753a76f94a4441784ce694790dbc661, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=34.3 K 2024-12-07T18:21:26,373 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75646edf3f3f43d4be9ce7d61f1eee30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733595683340 2024-12-07T18:21:26,374 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3753a76f94a4441784ce694790dbc661, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595683000 2024-12-07T18:21:26,374 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc41d4c160d1436092a7ac4f5be3ef44, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595685482 2024-12-07T18:21:26,374 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d4a22ad2bda546d69142e4ef5791b971, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733595683340 2024-12-07T18:21:26,375 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ac71a322419a4ecca885c8a18d75499c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595685488 2024-12-07T18:21:26,383 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:26,384 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#460 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:26,384 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/3c7ad2eab0ce498c9ac80c567ac654e4 is 50, key is test_row_0/A:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:26,385 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/5f960df110944e0ca2aa87154a63b415 is 50, key is test_row_0/B:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:26,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742362_1538 (size=13051) 2024-12-07T18:21:26,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742363_1539 (size=13051) 2024-12-07T18:21:26,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-07T18:21:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,399 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:21:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:26,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:26,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/255e0447b7d34d18b69e0240e031a023 is 50, key is test_row_0/A:col10/1733595685534/Put/seqid=0 2024-12-07T18:21:26,407 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/5f960df110944e0ca2aa87154a63b415 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/5f960df110944e0ca2aa87154a63b415 2024-12-07T18:21:26,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742364_1540 (size=12301) 2024-12-07T18:21:26,411 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/255e0447b7d34d18b69e0240e031a023 2024-12-07T18:21:26,414 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into 5f960df110944e0ca2aa87154a63b415(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:26,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:26,414 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595686371; duration=0sec 2024-12-07T18:21:26,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:26,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:26,414 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:26,417 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:26,417 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:26,417 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,417 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/6ef5b3a29cdf4c33a13ef464755b7adb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=34.3 K 2024-12-07T18:21:26,417 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ef5b3a29cdf4c33a13ef464755b7adb, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733595683000 2024-12-07T18:21:26,418 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a156616305442a4ae6f831258ce3cde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733595683340 2024-12-07T18:21:26,418 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 836e988d01ec4701a3f13d9b75529963, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595685488 2024-12-07T18:21:26,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0a3cd07e2db74d6b929e350741766f68 is 50, key is test_row_0/B:col10/1733595685534/Put/seqid=0 2024-12-07T18:21:26,438 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:26,438 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/7aad990c81344612b068d05adb399bcf is 50, key is test_row_0/C:col10/1733595685489/Put/seqid=0 2024-12-07T18:21:26,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742365_1541 (size=12301) 2024-12-07T18:21:26,441 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0a3cd07e2db74d6b929e350741766f68 2024-12-07T18:21:26,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/0af25b07134948b49e63b9db2f62e4a4 is 50, key is test_row_0/C:col10/1733595685534/Put/seqid=0 2024-12-07T18:21:26,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742366_1542 (size=13051) 2024-12-07T18:21:26,461 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/7aad990c81344612b068d05adb399bcf as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/7aad990c81344612b068d05adb399bcf 2024-12-07T18:21:26,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742367_1543 (size=12301) 2024-12-07T18:21:26,462 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/0af25b07134948b49e63b9db2f62e4a4 2024-12-07T18:21:26,467 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 7aad990c81344612b068d05adb399bcf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:26,467 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:26,467 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595686372; duration=0sec 2024-12-07T18:21:26,467 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:26,467 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:26,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/255e0447b7d34d18b69e0240e031a023 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023 2024-12-07T18:21:26,471 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023, entries=150, sequenceid=345, filesize=12.0 K 2024-12-07T18:21:26,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0a3cd07e2db74d6b929e350741766f68 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68 2024-12-07T18:21:26,475 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68, entries=150, sequenceid=345, filesize=12.0 K 2024-12-07T18:21:26,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/0af25b07134948b49e63b9db2f62e4a4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4 2024-12-07T18:21:26,481 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4, entries=150, sequenceid=345, filesize=12.0 K 2024-12-07T18:21:26,482 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for b2a15b0d424567342f9c04e70cd08f88 in 83ms, sequenceid=345, compaction requested=false 2024-12-07T18:21:26,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:26,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-07T18:21:26,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-07T18:21:26,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-07T18:21:26,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 847 msec 2024-12-07T18:21:26,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 852 msec 2024-12-07T18:21:26,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:26,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:26,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:26,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/34f72303d48f42cfbe87d3e1de6f6d56 is 50, key is test_row_0/A:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:26,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742368_1544 (size=14741) 2024-12-07T18:21:26,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595746704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595746705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595746706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595746709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T18:21:26,738 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-07T18:21:26,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-12-07T18:21:26,741 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:26,741 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:26,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:26,793 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/3c7ad2eab0ce498c9ac80c567ac654e4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3c7ad2eab0ce498c9ac80c567ac654e4 2024-12-07T18:21:26,798 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 3c7ad2eab0ce498c9ac80c567ac654e4(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:26,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:26,798 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595686371; duration=0sec 2024-12-07T18:21:26,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:26,798 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:26,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595746810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595746810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595746812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:26,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595746814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:26,893 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:26,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:26,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:26,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:26,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595747015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595747016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595747016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595747023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:27,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:27,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:27,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/34f72303d48f42cfbe87d3e1de6f6d56 2024-12-07T18:21:27,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/194af89e39f94672b5e9b40c9d53982a is 50, key is test_row_0/B:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:27,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742369_1545 (size=12301) 2024-12-07T18:21:27,198 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:27,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:27,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595747324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595747324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595747326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595747332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:27,351 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:27,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:27,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/194af89e39f94672b5e9b40c9d53982a 2024-12-07T18:21:27,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4037bf2cc1f64f9cbb2f9b00c8e822cb is 50, key is test_row_0/C:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:27,504 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:27,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:27,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:27,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742370_1546 (size=12301) 2024-12-07T18:21:27,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4037bf2cc1f64f9cbb2f9b00c8e822cb 2024-12-07T18:21:27,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/34f72303d48f42cfbe87d3e1de6f6d56 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56 2024-12-07T18:21:27,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56, entries=200, sequenceid=359, filesize=14.4 K 2024-12-07T18:21:27,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/194af89e39f94672b5e9b40c9d53982a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a 2024-12-07T18:21:27,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a, entries=150, sequenceid=359, filesize=12.0 K 2024-12-07T18:21:27,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4037bf2cc1f64f9cbb2f9b00c8e822cb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb 2024-12-07T18:21:27,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb, entries=150, sequenceid=359, filesize=12.0 K 2024-12-07T18:21:27,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b2a15b0d424567342f9c04e70cd08f88 in 870ms, sequenceid=359, compaction requested=true 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:27,538 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:27,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-07T18:21:27,539 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:27,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:27,540 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:27,540 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,540 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3c7ad2eab0ce498c9ac80c567ac654e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=39.2 K 2024-12-07T18:21:27,540 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:27,540 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:27,540 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,540 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/5f960df110944e0ca2aa87154a63b415, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.8 K 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f960df110944e0ca2aa87154a63b415, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595683345 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c7ad2eab0ce498c9ac80c567ac654e4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595683345 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a3cd07e2db74d6b929e350741766f68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733595685526 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 255e0447b7d34d18b69e0240e031a023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733595685526 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 194af89e39f94672b5e9b40c9d53982a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686665 2024-12-07T18:21:27,541 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 34f72303d48f42cfbe87d3e1de6f6d56, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686664 2024-12-07T18:21:27,549 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:27,550 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/c76a20cbf6734d7eb79cd3fa67b8013c is 50, key is test_row_0/B:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:27,556 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#469 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:27,556 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/635aa7c0e3f746e0b0ed0a35f272da9a is 50, key is test_row_0/A:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:27,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742371_1547 (size=13153) 2024-12-07T18:21:27,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742372_1548 (size=13153) 2024-12-07T18:21:27,576 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/635aa7c0e3f746e0b0ed0a35f272da9a as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/635aa7c0e3f746e0b0ed0a35f272da9a 2024-12-07T18:21:27,581 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 635aa7c0e3f746e0b0ed0a35f272da9a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:27,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:27,581 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595687538; duration=0sec 2024-12-07T18:21:27,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:27,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:27,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:27,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:27,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:27,582 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,582 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/7aad990c81344612b068d05adb399bcf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.8 K 2024-12-07T18:21:27,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aad990c81344612b068d05adb399bcf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733595683345 2024-12-07T18:21:27,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 0af25b07134948b49e63b9db2f62e4a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733595685526 2024-12-07T18:21:27,583 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4037bf2cc1f64f9cbb2f9b00c8e822cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686665 2024-12-07T18:21:27,588 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:27,589 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/13f670cc626d40969c443d5484e5201f is 50, key is test_row_0/C:col10/1733595686666/Put/seqid=0 2024-12-07T18:21:27,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742373_1549 (size=13153) 2024-12-07T18:21:27,657 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-07T18:21:27,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:27,657 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/05e2594fda3747e2af446f360f9fc17f is 50, key is test_row_0/A:col10/1733595686705/Put/seqid=0 2024-12-07T18:21:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742374_1550 (size=12301) 2024-12-07T18:21:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:27,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:27,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595747845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595747846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595747847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595747848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595747950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595747950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595747953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595747955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:27,975 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/c76a20cbf6734d7eb79cd3fa67b8013c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c76a20cbf6734d7eb79cd3fa67b8013c 2024-12-07T18:21:27,982 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into c76a20cbf6734d7eb79cd3fa67b8013c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:27,982 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:27,982 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595687538; duration=0sec 2024-12-07T18:21:27,982 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:27,982 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:27,995 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/13f670cc626d40969c443d5484e5201f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/13f670cc626d40969c443d5484e5201f 2024-12-07T18:21:27,999 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 13f670cc626d40969c443d5484e5201f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:27,999 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:27,999 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595687538; duration=0sec 2024-12-07T18:21:27,999 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:27,999 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:28,075 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/05e2594fda3747e2af446f360f9fc17f 2024-12-07T18:21:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/871cc2b5bff04d228f33d439e59660b3 is 50, key is test_row_0/B:col10/1733595686705/Put/seqid=0 2024-12-07T18:21:28,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742375_1551 (size=12301) 2024-12-07T18:21:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595748154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595748155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595748157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595748159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595748461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595748461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595748462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:28,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595748464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:28,487 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/871cc2b5bff04d228f33d439e59660b3 2024-12-07T18:21:28,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ddcee512dd9b40e6bc9caaffdba73b43 is 50, key is test_row_0/C:col10/1733595686705/Put/seqid=0 2024-12-07T18:21:28,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742376_1552 (size=12301) 2024-12-07T18:21:28,500 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ddcee512dd9b40e6bc9caaffdba73b43 2024-12-07T18:21:28,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/05e2594fda3747e2af446f360f9fc17f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f 2024-12-07T18:21:28,507 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f, entries=150, sequenceid=385, filesize=12.0 K 2024-12-07T18:21:28,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/871cc2b5bff04d228f33d439e59660b3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3 2024-12-07T18:21:28,511 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3, entries=150, sequenceid=385, filesize=12.0 K 2024-12-07T18:21:28,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ddcee512dd9b40e6bc9caaffdba73b43 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43 2024-12-07T18:21:28,517 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43, entries=150, sequenceid=385, filesize=12.0 K 2024-12-07T18:21:28,517 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b2a15b0d424567342f9c04e70cd08f88 in 860ms, sequenceid=385, compaction requested=false 2024-12-07T18:21:28,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:28,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:28,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-12-07T18:21:28,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-12-07T18:21:28,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-07T18:21:28,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7840 sec 2024-12-07T18:21:28,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 1.7870 sec 2024-12-07T18:21:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-07T18:21:28,845 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-07T18:21:28,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-12-07T18:21:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:28,847 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:28,848 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:28,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:28,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:28,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1638537c61e7405187a24febd9f69b99 is 50, key is test_row_0/A:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:28,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742377_1553 (size=14741) 2024-12-07T18:21:28,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595749003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595749003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595749004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595749008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595749113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595749113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595749113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595749114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:29,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,303 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595749317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595749317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595749318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595749318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1638537c61e7405187a24febd9f69b99 2024-12-07T18:21:29,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/42f835f8c4764225867d5a4703ef82d5 is 50, key is test_row_0/B:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:29,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742378_1554 (size=12301) 2024-12-07T18:21:29,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/42f835f8c4764225867d5a4703ef82d5 2024-12-07T18:21:29,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ad30525da29848cf87ac96cc2eb7ee1d is 50, key is test_row_0/C:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:29,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742379_1555 (size=12301) 2024-12-07T18:21:29,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:29,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,609 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56878 deadline: 1733595749621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56880 deadline: 1733595749623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56956 deadline: 1733595749626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56892 deadline: 1733595749626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,676 DEBUG [Thread-2110 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35ca71a1 to 127.0.0.1:56016 2024-12-07T18:21:29,676 DEBUG [Thread-2110 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:29,677 DEBUG [Thread-2104 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e690d6 to 127.0.0.1:56016 2024-12-07T18:21:29,678 DEBUG [Thread-2104 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:29,681 DEBUG [Thread-2108 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00df2701 to 127.0.0.1:56016 2024-12-07T18:21:29,681 DEBUG [Thread-2106 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3abeec20 to 127.0.0.1:56016 2024-12-07T18:21:29,681 DEBUG [Thread-2108 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:29,681 DEBUG [Thread-2106 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:29,682 DEBUG [Thread-2112 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56a4483a to 127.0.0.1:56016 2024-12-07T18:21:29,682 DEBUG [Thread-2112 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:29,762 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:29,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:29,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ad30525da29848cf87ac96cc2eb7ee1d 2024-12-07T18:21:29,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/1638537c61e7405187a24febd9f69b99 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99 2024-12-07T18:21:29,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99, entries=200, sequenceid=400, filesize=14.4 K 2024-12-07T18:21:29,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/42f835f8c4764225867d5a4703ef82d5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5 2024-12-07T18:21:29,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5, entries=150, sequenceid=400, filesize=12.0 K 2024-12-07T18:21:29,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/ad30525da29848cf87ac96cc2eb7ee1d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d 2024-12-07T18:21:29,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d, entries=150, sequenceid=400, filesize=12.0 K 2024-12-07T18:21:29,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b2a15b0d424567342f9c04e70cd08f88 in 847ms, sequenceid=400, compaction requested=true 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:29,817 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b2a15b0d424567342f9c04e70cd08f88:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:29,817 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:29,818 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:29,818 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:29,818 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/A is initiating minor compaction (all files) 2024-12-07T18:21:29,818 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/B is initiating minor compaction (all files) 2024-12-07T18:21:29,818 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/A in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,818 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/B in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,818 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/635aa7c0e3f746e0b0ed0a35f272da9a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=39.3 K 2024-12-07T18:21:29,818 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c76a20cbf6734d7eb79cd3fa67b8013c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.9 K 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c76a20cbf6734d7eb79cd3fa67b8013c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686665 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 635aa7c0e3f746e0b0ed0a35f272da9a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686665 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05e2594fda3747e2af446f360f9fc17f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733595686701 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 871cc2b5bff04d228f33d439e59660b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733595686701 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1638537c61e7405187a24febd9f69b99, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1733595687841 2024-12-07T18:21:29,819 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 42f835f8c4764225867d5a4703ef82d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1733595687841 2024-12-07T18:21:29,824 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#A#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:29,824 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#B#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:29,825 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/85b60c9c360546cb8a70a865059f6c09 is 50, key is test_row_0/A:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:29,825 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/a0f0bb941d32449dba8cfb697603a238 is 50, key is test_row_0/B:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:29,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742380_1556 (size=13255) 2024-12-07T18:21:29,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742381_1557 (size=13255) 2024-12-07T18:21:29,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:29,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:29,916 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:29,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/19720ccdf9734c7481bdecb8496f115f is 50, key is test_row_0/A:col10/1733595689003/Put/seqid=0 2024-12-07T18:21:29,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742382_1558 (size=12301) 2024-12-07T18:21:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:30,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:30,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. as already flushing 2024-12-07T18:21:30,131 DEBUG [Thread-2097 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51453050 to 127.0.0.1:56016 2024-12-07T18:21:30,131 DEBUG [Thread-2101 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f99adfe to 127.0.0.1:56016 2024-12-07T18:21:30,131 DEBUG [Thread-2101 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:30,131 DEBUG [Thread-2097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:30,133 DEBUG [Thread-2093 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46c37647 to 127.0.0.1:56016 2024-12-07T18:21:30,133 DEBUG [Thread-2099 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x632d1806 to 127.0.0.1:56016 2024-12-07T18:21:30,133 DEBUG [Thread-2093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:30,133 DEBUG [Thread-2099 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:30,233 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/a0f0bb941d32449dba8cfb697603a238 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/a0f0bb941d32449dba8cfb697603a238 2024-12-07T18:21:30,236 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/85b60c9c360546cb8a70a865059f6c09 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/85b60c9c360546cb8a70a865059f6c09 2024-12-07T18:21:30,237 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/B of b2a15b0d424567342f9c04e70cd08f88 into a0f0bb941d32449dba8cfb697603a238(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:30,237 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:30,237 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/B, priority=13, startTime=1733595689817; duration=0sec 2024-12-07T18:21:30,237 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:30,237 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:B 2024-12-07T18:21:30,237 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:30,237 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:30,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): b2a15b0d424567342f9c04e70cd08f88/C is initiating minor compaction (all files) 2024-12-07T18:21:30,238 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b2a15b0d424567342f9c04e70cd08f88/C in TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:30,238 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/13f670cc626d40969c443d5484e5201f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp, totalSize=36.9 K 2024-12-07T18:21:30,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 13f670cc626d40969c443d5484e5201f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733595686665 2024-12-07T18:21:30,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ddcee512dd9b40e6bc9caaffdba73b43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733595686701 2024-12-07T18:21:30,238 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ad30525da29848cf87ac96cc2eb7ee1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1733595687841 2024-12-07T18:21:30,239 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/A of b2a15b0d424567342f9c04e70cd08f88 into 85b60c9c360546cb8a70a865059f6c09(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:30,239 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:30,239 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/A, priority=13, startTime=1733595689817; duration=0sec 2024-12-07T18:21:30,239 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:30,239 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:A 2024-12-07T18:21:30,243 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b2a15b0d424567342f9c04e70cd08f88#C#compaction#480 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:30,244 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/86574bdd77374e04ab68938fb8ec6e0d is 50, key is test_row_0/C:col10/1733595687846/Put/seqid=0 2024-12-07T18:21:30,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742383_1559 (size=13255) 2024-12-07T18:21:30,323 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/19720ccdf9734c7481bdecb8496f115f 2024-12-07T18:21:30,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/f1725566ac51474ebf35a3236f2a4d86 is 50, key is test_row_0/B:col10/1733595689003/Put/seqid=0 2024-12-07T18:21:30,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742384_1560 (size=12301) 2024-12-07T18:21:30,640 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:21:30,651 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/86574bdd77374e04ab68938fb8ec6e0d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/86574bdd77374e04ab68938fb8ec6e0d 2024-12-07T18:21:30,654 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b2a15b0d424567342f9c04e70cd08f88/C of b2a15b0d424567342f9c04e70cd08f88 into 86574bdd77374e04ab68938fb8ec6e0d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:30,654 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:30,654 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88., storeName=b2a15b0d424567342f9c04e70cd08f88/C, priority=13, startTime=1733595689817; duration=0sec 2024-12-07T18:21:30,654 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:30,654 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b2a15b0d424567342f9c04e70cd08f88:C 2024-12-07T18:21:30,732 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/f1725566ac51474ebf35a3236f2a4d86 2024-12-07T18:21:30,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b2791dccb3924150978d037de99690a8 is 50, key is test_row_0/C:col10/1733595689003/Put/seqid=0 2024-12-07T18:21:30,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742385_1561 (size=12301) 2024-12-07T18:21:30,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:31,141 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b2791dccb3924150978d037de99690a8 2024-12-07T18:21:31,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/19720ccdf9734c7481bdecb8496f115f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19720ccdf9734c7481bdecb8496f115f 2024-12-07T18:21:31,147 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19720ccdf9734c7481bdecb8496f115f, entries=150, sequenceid=423, filesize=12.0 K 2024-12-07T18:21:31,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/f1725566ac51474ebf35a3236f2a4d86 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/f1725566ac51474ebf35a3236f2a4d86 2024-12-07T18:21:31,150 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/f1725566ac51474ebf35a3236f2a4d86, entries=150, sequenceid=423, filesize=12.0 K 2024-12-07T18:21:31,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/b2791dccb3924150978d037de99690a8 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b2791dccb3924150978d037de99690a8 2024-12-07T18:21:31,152 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b2791dccb3924150978d037de99690a8, entries=150, sequenceid=423, filesize=12.0 K 2024-12-07T18:21:31,153 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for b2a15b0d424567342f9c04e70cd08f88 in 1237ms, sequenceid=423, compaction requested=false 2024-12-07T18:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-12-07T18:21:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-12-07T18:21:31,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-07T18:21:31,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3060 sec 2024-12-07T18:21:31,155 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 2.3090 sec 2024-12-07T18:21:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-07T18:21:32,951 INFO [Thread-2103 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-12-07T18:21:33,662 DEBUG [Thread-2095 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fb24d40 to 127.0.0.1:56016 2024-12-07T18:21:33,662 DEBUG [Thread-2095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 117 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2551 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7653 rows 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2526 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7578 rows 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2545 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7635 rows 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2549 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7647 rows 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2529 2024-12-07T18:21:33,662 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7587 rows 2024-12-07T18:21:33,662 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:21:33,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4fbee617 to 127.0.0.1:56016 2024-12-07T18:21:33,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:33,665 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:21:33,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:21:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:33,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-07T18:21:33,669 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595693669"}]},"ts":"1733595693669"} 2024-12-07T18:21:33,670 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:21:33,673 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:21:33,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:21:33,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, UNASSIGN}] 2024-12-07T18:21:33,675 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, UNASSIGN 2024-12-07T18:21:33,675 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b2a15b0d424567342f9c04e70cd08f88, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:33,676 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:21:33,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-07T18:21:33,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:33,828 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing b2a15b0d424567342f9c04e70cd08f88, disabling compactions & flushes 2024-12-07T18:21:33,828 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. after waiting 0 ms 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:33,828 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(2837): Flushing b2a15b0d424567342f9c04e70cd08f88 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=A 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=B 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b2a15b0d424567342f9c04e70cd08f88, store=C 2024-12-07T18:21:33,828 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:33,832 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/53f8df4d61b946ef94f8fdd547bbf460 is 50, key is test_row_1/A:col10/1733595693661/Put/seqid=0 2024-12-07T18:21:33,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742386_1562 (size=9857) 2024-12-07T18:21:33,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-07T18:21:34,236 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/53f8df4d61b946ef94f8fdd547bbf460 2024-12-07T18:21:34,242 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0aca143e590040eaa08f3ffd9566a39f is 50, key is test_row_1/B:col10/1733595693661/Put/seqid=0 2024-12-07T18:21:34,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742387_1563 (size=9857) 2024-12-07T18:21:34,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-07T18:21:34,646 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0aca143e590040eaa08f3ffd9566a39f 2024-12-07T18:21:34,651 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4a0be54cec0547319dff74a673115bd5 is 50, key is test_row_1/C:col10/1733595693661/Put/seqid=0 2024-12-07T18:21:34,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742388_1564 (size=9857) 2024-12-07T18:21:34,654 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4a0be54cec0547319dff74a673115bd5 2024-12-07T18:21:34,657 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/A/53f8df4d61b946ef94f8fdd547bbf460 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/53f8df4d61b946ef94f8fdd547bbf460 2024-12-07T18:21:34,660 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/53f8df4d61b946ef94f8fdd547bbf460, entries=100, sequenceid=434, filesize=9.6 K 2024-12-07T18:21:34,660 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/B/0aca143e590040eaa08f3ffd9566a39f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0aca143e590040eaa08f3ffd9566a39f 2024-12-07T18:21:34,662 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0aca143e590040eaa08f3ffd9566a39f, entries=100, sequenceid=434, filesize=9.6 K 2024-12-07T18:21:34,663 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/.tmp/C/4a0be54cec0547319dff74a673115bd5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a0be54cec0547319dff74a673115bd5 2024-12-07T18:21:34,665 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a0be54cec0547319dff74a673115bd5, entries=100, sequenceid=434, filesize=9.6 K 2024-12-07T18:21:34,666 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for b2a15b0d424567342f9c04e70cd08f88 in 838ms, sequenceid=434, compaction requested=true 2024-12-07T18:21:34,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/fc6cc90ce30145448cd919988dda5d00, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/5396969203f541a8aecfc9ae3f12c9a5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/8c4b246da80549dd8a3f350541e44ad0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/40b12b12dc2e4c778f8570b83c54a3fb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19244919906e45978297b1c9f923086d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3c7ad2eab0ce498c9ac80c567ac654e4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/635aa7c0e3f746e0b0ed0a35f272da9a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99] to archive 2024-12-07T18:21:34,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:34,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e798c5eb2c8747dd9796092a068d7c9f 2024-12-07T18:21:34,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/644a3d7a808341d6823130a5d6f3b7f8 2024-12-07T18:21:34,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/0909dac0ebbe4d2cb708808006706e87 2024-12-07T18:21:34,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/6da7c82f5ced464085972eb5451ad99b 2024-12-07T18:21:34,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/21322693ec5b48958007cf74f2967b55 2024-12-07T18:21:34,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/2089d527ea6248b594e94bd51c9cfb77 2024-12-07T18:21:34,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/fc6cc90ce30145448cd919988dda5d00 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/fc6cc90ce30145448cd919988dda5d00 2024-12-07T18:21:34,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4e2cc6b221da41778ee121e46bfb2780 2024-12-07T18:21:34,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/e9f5cd96c7db48188aeba3df25d6205a 2024-12-07T18:21:34,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/5396969203f541a8aecfc9ae3f12c9a5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/5396969203f541a8aecfc9ae3f12c9a5 2024-12-07T18:21:34,677 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/d74ae3d1e7a0407b90948424901de5f3 2024-12-07T18:21:34,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/a8d05c7b5e244696b45eb85d254efdc9 2024-12-07T18:21:34,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4826ae27aa03474985dad7b2396fcf19 2024-12-07T18:21:34,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/8c4b246da80549dd8a3f350541e44ad0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/8c4b246da80549dd8a3f350541e44ad0 2024-12-07T18:21:34,680 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1227a80d00d34e09b5efa9d2c0972888 2024-12-07T18:21:34,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/4fb3588c68694a549b238c6470357f16 2024-12-07T18:21:34,682 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/40b12b12dc2e4c778f8570b83c54a3fb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/40b12b12dc2e4c778f8570b83c54a3fb 2024-12-07T18:21:34,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3eaaaacfb8914b308e764d53e372332a 2024-12-07T18:21:34,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/c851f670429949cc8cfc2ce623a395cc 2024-12-07T18:21:34,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19244919906e45978297b1c9f923086d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19244919906e45978297b1c9f923086d 2024-12-07T18:21:34,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/75646edf3f3f43d4be9ce7d61f1eee30 2024-12-07T18:21:34,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/dc41d4c160d1436092a7ac4f5be3ef44 2024-12-07T18:21:34,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3c7ad2eab0ce498c9ac80c567ac654e4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/3c7ad2eab0ce498c9ac80c567ac654e4 2024-12-07T18:21:34,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/255e0447b7d34d18b69e0240e031a023 2024-12-07T18:21:34,688 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/34f72303d48f42cfbe87d3e1de6f6d56 2024-12-07T18:21:34,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/635aa7c0e3f746e0b0ed0a35f272da9a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/635aa7c0e3f746e0b0ed0a35f272da9a 2024-12-07T18:21:34,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/05e2594fda3747e2af446f360f9fc17f 2024-12-07T18:21:34,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/1638537c61e7405187a24febd9f69b99 2024-12-07T18:21:34,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/6202f403c8d640e6a4ba445f8104c2ae, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/48ab0739cef743dbbd741e528c1d82dd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/659642dc2b3a4da086c8b0a9ecf7212b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b712731248d445297cbf739a542cacb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/3753a76f94a4441784ce694790dbc661, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/5f960df110944e0ca2aa87154a63b415, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c76a20cbf6734d7eb79cd3fa67b8013c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5] to archive 2024-12-07T18:21:34,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:34,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/30af0008e9ad4955a3eaac480c51265a 2024-12-07T18:21:34,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0d22cf9b23854c508b2624e63730c6a6 2024-12-07T18:21:34,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b0925e4d5234c3ea0b50310d1f03e9d 2024-12-07T18:21:34,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/816bd78ced3e4fcfbf18739030f42e69 2024-12-07T18:21:34,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/8b39fd1c343444c0a69f28d15f864d03 2024-12-07T18:21:34,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/6202f403c8d640e6a4ba445f8104c2ae to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/6202f403c8d640e6a4ba445f8104c2ae 2024-12-07T18:21:34,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ec515b7d0ea3476bab771c96ece70183 2024-12-07T18:21:34,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bb17ac27f9f94bfc9fb59f52b5c921e7 2024-12-07T18:21:34,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/48ab0739cef743dbbd741e528c1d82dd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/48ab0739cef743dbbd741e528c1d82dd 2024-12-07T18:21:34,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/abb6d2a6c43c44318633cddda2e41642 2024-12-07T18:21:34,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/bcd181b9204f46a7bc1bf759e5531756 2024-12-07T18:21:34,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/41856ea46a754b938600b972fca6d71e 2024-12-07T18:21:34,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/659642dc2b3a4da086c8b0a9ecf7212b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/659642dc2b3a4da086c8b0a9ecf7212b 2024-12-07T18:21:34,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c1bb5fea423346fd9013473be3120f7e 2024-12-07T18:21:34,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/4ea8e405b15a4bfab29b3f16f3a25d9b 2024-12-07T18:21:34,704 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b712731248d445297cbf739a542cacb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2b712731248d445297cbf739a542cacb 2024-12-07T18:21:34,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/682bf4b6fafd43ebb7ce8b78419bfea2 2024-12-07T18:21:34,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/2d857f6225144f31a8919ec25a9d636a 2024-12-07T18:21:34,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/3753a76f94a4441784ce694790dbc661 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/3753a76f94a4441784ce694790dbc661 2024-12-07T18:21:34,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/993bed6fe6a644ceac1d5454d09d0ffc 2024-12-07T18:21:34,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/d4a22ad2bda546d69142e4ef5791b971 2024-12-07T18:21:34,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/5f960df110944e0ca2aa87154a63b415 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/5f960df110944e0ca2aa87154a63b415 2024-12-07T18:21:34,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/ac71a322419a4ecca885c8a18d75499c 2024-12-07T18:21:34,710 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0a3cd07e2db74d6b929e350741766f68 2024-12-07T18:21:34,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c76a20cbf6734d7eb79cd3fa67b8013c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/c76a20cbf6734d7eb79cd3fa67b8013c 2024-12-07T18:21:34,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/194af89e39f94672b5e9b40c9d53982a 2024-12-07T18:21:34,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/871cc2b5bff04d228f33d439e59660b3 2024-12-07T18:21:34,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/42f835f8c4764225867d5a4703ef82d5 2024-12-07T18:21:34,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/79590bf70020415bbf3cae5302b10360, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a345eec47cd4a8fb627418bfe464041, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/e738b5dd0926494aa3f26c73582f13c2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1144c91ca3c64e258d76b415b0170f97, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/6ef5b3a29cdf4c33a13ef464755b7adb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/7aad990c81344612b068d05adb399bcf, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/13f670cc626d40969c443d5484e5201f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d] to archive 2024-12-07T18:21:34,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:21:34,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8c2b029dc9a842d7b05901b86b3cefc6 2024-12-07T18:21:34,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/cd73b93728e74e0087f63e7e7f979c56 2024-12-07T18:21:34,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/95fdfd801cd64688abb88d6e701c5938 2024-12-07T18:21:34,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/a0796de97ab94bc2b9ee24ae0749ee7a 2024-12-07T18:21:34,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c1923b3719a149d598e5308e9318a76b 2024-12-07T18:21:34,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/79590bf70020415bbf3cae5302b10360 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/79590bf70020415bbf3cae5302b10360 2024-12-07T18:21:34,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/f40fb889c61d455688fc106a592baaf8 2024-12-07T18:21:34,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ece21e4f170646d5ad0c9abe56e69b90 2024-12-07T18:21:34,723 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a345eec47cd4a8fb627418bfe464041 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a345eec47cd4a8fb627418bfe464041 2024-12-07T18:21:34,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/55281eb4b7ba4ea1b437b0f81ea1a19e 2024-12-07T18:21:34,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/8cdc1ff0e9e14099bdae517fb7e5114b 2024-12-07T18:21:34,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/759ab423d84445ed83f695b72f4706d3 2024-12-07T18:21:34,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/e738b5dd0926494aa3f26c73582f13c2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/e738b5dd0926494aa3f26c73582f13c2 2024-12-07T18:21:34,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/3c4310754680496e9bf1ceb8e4798a13 2024-12-07T18:21:34,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c3c5d2da4bfa4cc7ab7acf4165dda763 2024-12-07T18:21:34,730 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1144c91ca3c64e258d76b415b0170f97 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1144c91ca3c64e258d76b415b0170f97 2024-12-07T18:21:34,730 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/c240dcdc900d4e61bff5cb77bb989905 2024-12-07T18:21:34,732 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4ade2ca5230d4de2ac7da424eac20d04 2024-12-07T18:21:34,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/6ef5b3a29cdf4c33a13ef464755b7adb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/6ef5b3a29cdf4c33a13ef464755b7adb 2024-12-07T18:21:34,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b48c915b9c0d48ed8845895ca975869d 2024-12-07T18:21:34,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/1a156616305442a4ae6f831258ce3cde 2024-12-07T18:21:34,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/7aad990c81344612b068d05adb399bcf to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/7aad990c81344612b068d05adb399bcf 2024-12-07T18:21:34,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/836e988d01ec4701a3f13d9b75529963 2024-12-07T18:21:34,737 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/0af25b07134948b49e63b9db2f62e4a4 2024-12-07T18:21:34,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/13f670cc626d40969c443d5484e5201f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/13f670cc626d40969c443d5484e5201f 2024-12-07T18:21:34,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4037bf2cc1f64f9cbb2f9b00c8e822cb 2024-12-07T18:21:34,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ddcee512dd9b40e6bc9caaffdba73b43 2024-12-07T18:21:34,740 DEBUG [StoreCloser-TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/ad30525da29848cf87ac96cc2eb7ee1d 2024-12-07T18:21:34,744 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/recovered.edits/437.seqid, newMaxSeqId=437, maxSeqId=1 2024-12-07T18:21:34,744 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88. 2024-12-07T18:21:34,744 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for b2a15b0d424567342f9c04e70cd08f88: 2024-12-07T18:21:34,745 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:34,746 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b2a15b0d424567342f9c04e70cd08f88, regionState=CLOSED 2024-12-07T18:21:34,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-07T18:21:34,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure b2a15b0d424567342f9c04e70cd08f88, server=8a7a030b35db,45237,1733595542335 in 1.0710 sec 2024-12-07T18:21:34,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-07T18:21:34,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b2a15b0d424567342f9c04e70cd08f88, UNASSIGN in 1.0740 sec 2024-12-07T18:21:34,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-07T18:21:34,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0760 sec 2024-12-07T18:21:34,751 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595694751"}]},"ts":"1733595694751"} 2024-12-07T18:21:34,751 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:21:34,754 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:21:34,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0880 sec 2024-12-07T18:21:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-07T18:21:34,771 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-07T18:21:34,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:21:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,772 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T18:21:34,773 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=162, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,774 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:34,776 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/recovered.edits] 2024-12-07T18:21:34,778 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19720ccdf9734c7481bdecb8496f115f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/19720ccdf9734c7481bdecb8496f115f 2024-12-07T18:21:34,778 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/53f8df4d61b946ef94f8fdd547bbf460 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/53f8df4d61b946ef94f8fdd547bbf460 2024-12-07T18:21:34,780 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/85b60c9c360546cb8a70a865059f6c09 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/A/85b60c9c360546cb8a70a865059f6c09 2024-12-07T18:21:34,781 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0aca143e590040eaa08f3ffd9566a39f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/0aca143e590040eaa08f3ffd9566a39f 2024-12-07T18:21:34,782 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/a0f0bb941d32449dba8cfb697603a238 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/a0f0bb941d32449dba8cfb697603a238 2024-12-07T18:21:34,783 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/f1725566ac51474ebf35a3236f2a4d86 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/B/f1725566ac51474ebf35a3236f2a4d86 2024-12-07T18:21:34,785 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a0be54cec0547319dff74a673115bd5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/4a0be54cec0547319dff74a673115bd5 2024-12-07T18:21:34,785 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/86574bdd77374e04ab68938fb8ec6e0d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/86574bdd77374e04ab68938fb8ec6e0d 2024-12-07T18:21:34,786 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b2791dccb3924150978d037de99690a8 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/C/b2791dccb3924150978d037de99690a8 2024-12-07T18:21:34,788 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/recovered.edits/437.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88/recovered.edits/437.seqid 2024-12-07T18:21:34,789 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/b2a15b0d424567342f9c04e70cd08f88 2024-12-07T18:21:34,789 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:21:34,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=162, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,792 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:21:34,793 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:21:34,794 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=162, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,794 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:21:34,794 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595694794"}]},"ts":"9223372036854775807"} 2024-12-07T18:21:34,795 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:21:34,795 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b2a15b0d424567342f9c04e70cd08f88, NAME => 'TestAcidGuarantees,,1733595667523.b2a15b0d424567342f9c04e70cd08f88.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:21:34,795 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:21:34,795 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595694795"}]},"ts":"9223372036854775807"} 2024-12-07T18:21:34,796 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:21:34,799 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=162, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-12-07T18:21:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T18:21:34,874 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 162 completed 2024-12-07T18:21:34,884 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=446 (was 448), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=437 (was 442), ProcessCount=11 (was 11), AvailableMemoryMB=6898 (was 6931) 2024-12-07T18:21:34,894 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=437, ProcessCount=11, AvailableMemoryMB=6897 2024-12-07T18:21:34,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:21:34,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:21:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:34,897 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T18:21:34,897 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:34,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 163 2024-12-07T18:21:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-07T18:21:34,897 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T18:21:34,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742389_1565 (size=963) 2024-12-07T18:21:34,903 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7 2024-12-07T18:21:34,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742390_1566 (size=53) 2024-12-07T18:21:34,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-07T18:21:35,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5f25402e6c4eaa56d1d09719bc4c6a4c, disabling compactions & flushes 2024-12-07T18:21:35,308 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. after waiting 0 ms 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,308 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,308 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:35,310 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T18:21:35,311 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733595695311"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733595695311"}]},"ts":"1733595695311"} 2024-12-07T18:21:35,312 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T18:21:35,312 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T18:21:35,312 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595695312"}]},"ts":"1733595695312"} 2024-12-07T18:21:35,313 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-07T18:21:35,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, ASSIGN}] 2024-12-07T18:21:35,317 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, ASSIGN 2024-12-07T18:21:35,318 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, ASSIGN; state=OFFLINE, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=false 2024-12-07T18:21:35,468 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:35,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:35,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-07T18:21:35,621 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:35,623 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,623 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:21:35,623 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,624 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:21:35,624 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,624 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,625 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,626 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:35,626 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName A 2024-12-07T18:21:35,626 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:35,626 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:35,626 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,627 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:35,627 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName B 2024-12-07T18:21:35,627 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:35,628 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:35,628 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,629 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:35,629 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName C 2024-12-07T18:21:35,629 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:35,629 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:35,629 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,630 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,630 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,631 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:21:35,632 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:35,633 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T18:21:35,633 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 5f25402e6c4eaa56d1d09719bc4c6a4c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60813725, jitterRate=-0.09380488097667694}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:21:35,634 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:35,635 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., pid=165, masterSystemTime=1733595695621 2024-12-07T18:21:35,636 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,636 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:35,636 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=OPEN, openSeqNum=2, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:35,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-07T18:21:35,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 in 168 msec 2024-12-07T18:21:35,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-07T18:21:35,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, ASSIGN in 322 msec 2024-12-07T18:21:35,639 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T18:21:35,640 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595695639"}]},"ts":"1733595695639"} 2024-12-07T18:21:35,640 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-07T18:21:35,643 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T18:21:35,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 748 msec 2024-12-07T18:21:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-07T18:21:36,000 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-07T18:21:36,002 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6364386e to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@582ec26d 2024-12-07T18:21:36,007 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@507d2cf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,008 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,009 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,010 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T18:21:36,010 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T18:21:36,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-07T18:21:36,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T18:21:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=166, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-07T18:21:36,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742391_1567 (size=999) 2024-12-07T18:21:36,422 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-07T18:21:36,422 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-07T18:21:36,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:21:36,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, REOPEN/MOVE}] 2024-12-07T18:21:36,426 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, REOPEN/MOVE 2024-12-07T18:21:36,426 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:36,427 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:21:36,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE; CloseRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:36,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:36,579 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(124): Close 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,579 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:21:36,579 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1681): Closing 5f25402e6c4eaa56d1d09719bc4c6a4c, disabling compactions & flushes 2024-12-07T18:21:36,579 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,579 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,579 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. after waiting 0 ms 2024-12-07T18:21:36,579 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,582 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-07T18:21:36,583 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,583 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1635): Region close journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:36,583 WARN [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegionServer(3786): Not adding moved region record: 5f25402e6c4eaa56d1d09719bc4c6a4c to self. 2024-12-07T18:21:36,584 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(170): Closed 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,584 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=CLOSED 2024-12-07T18:21:36,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-07T18:21:36,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; CloseRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 in 158 msec 2024-12-07T18:21:36,587 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, REOPEN/MOVE; state=CLOSED, location=8a7a030b35db,45237,1733595542335; forceNewPlan=false, retain=true 2024-12-07T18:21:36,737 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=OPENING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:36,738 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=168, state=RUNNABLE; OpenRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:21:36,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:36,892 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,892 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7285): Opening region: {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} 2024-12-07T18:21:36,892 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,892 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T18:21:36,893 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7327): checking encryption for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,893 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7330): checking classloading for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,894 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,894 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:36,894 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName A 2024-12-07T18:21:36,895 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:36,896 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:36,896 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,896 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:36,897 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName B 2024-12-07T18:21:36,897 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:36,897 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:36,897 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,897 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-07T18:21:36,897 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f25402e6c4eaa56d1d09719bc4c6a4c columnFamilyName C 2024-12-07T18:21:36,897 DEBUG [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:36,898 INFO [StoreOpener-5f25402e6c4eaa56d1d09719bc4c6a4c-1 {}] regionserver.HStore(327): Store=5f25402e6c4eaa56d1d09719bc4c6a4c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T18:21:36,898 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,898 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,899 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,900 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T18:21:36,901 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1085): writing seq id for 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,902 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1102): Opened 5f25402e6c4eaa56d1d09719bc4c6a4c; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67518279, jitterRate=0.006100758910179138}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T18:21:36,903 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1001): Region open journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:36,904 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., pid=170, masterSystemTime=1733595696890 2024-12-07T18:21:36,905 DEBUG [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,905 INFO [RS_OPEN_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:36,905 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=OPEN, openSeqNum=5, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:36,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=168 2024-12-07T18:21:36,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=168, state=SUCCESS; OpenRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 in 168 msec 2024-12-07T18:21:36,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-07T18:21:36,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, REOPEN/MOVE in 482 msec 2024-12-07T18:21:36,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-12-07T18:21:36,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-12-07T18:21:36,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 898 msec 2024-12-07T18:21:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=166 2024-12-07T18:21:36,913 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4bbf3c1c to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65aca2ac 2024-12-07T18:21:36,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c0f5004, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,916 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7586d to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@314e353d 2024-12-07T18:21:36,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1767dc60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,919 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4949adfa to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@96e8e33 2024-12-07T18:21:36,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20c3d7a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,923 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53ef82c4 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e8d919c 2024-12-07T18:21:36,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10cd3d28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,926 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0672325a to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44b14279 2024-12-07T18:21:36,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d6c03ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,930 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6eb94416 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3395eba8 2024-12-07T18:21:36,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17f6ce8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,933 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3852b0e3 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2406c4ea 2024-12-07T18:21:36,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a5e441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,939 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4aa4b067 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58970c4d 2024-12-07T18:21:36,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@723a6cf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ca17819 to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cb4faa4 2024-12-07T18:21:36,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d48543c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,945 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77a6a62c to 127.0.0.1:56016 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1c03a6 2024-12-07T18:21:36,947 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336a5bad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T18:21:36,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-07T18:21:36,954 DEBUG [hconnection-0x4cd06f2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,954 DEBUG [hconnection-0x12dc2f25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:36,954 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:36,955 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,955 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,955 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:36,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:36,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:36,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:36,968 DEBUG [hconnection-0x7f457114-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,969 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,977 DEBUG [hconnection-0x43760125-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,978 DEBUG [hconnection-0x12b5ffb2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,978 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,979 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,979 DEBUG [hconnection-0x3dbcdd3a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,980 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,983 DEBUG [hconnection-0x459a6511-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,985 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,986 DEBUG [hconnection-0x74581f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,986 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,988 DEBUG [hconnection-0xe24c877-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,988 DEBUG [hconnection-0x4a16d5a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T18:21:36,989 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56918, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:36,989 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T18:21:37,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207712da428e42b45ce9740f14f6eece605_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595696962/Put/seqid=0 2024-12-07T18:21:37,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742392_1568 (size=12154) 2024-12-07T18:21:37,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595757032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595757032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595757032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595757034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595757035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:37,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595757136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595757136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595757136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595757136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595757137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:37,259 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595757338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595757339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595757340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595757340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595757340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,406 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:37,410 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207712da428e42b45ce9740f14f6eece605_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207712da428e42b45ce9740f14f6eece605_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:37,411 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cb04d5aa778f49578208f46061db7115, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:37,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cb04d5aa778f49578208f46061db7115 is 175, key is test_row_0/A:col10/1733595696962/Put/seqid=0 2024-12-07T18:21:37,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742393_1569 (size=30955) 2024-12-07T18:21:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:37,565 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595757642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595757642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595757642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595757643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595757644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,817 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cb04d5aa778f49578208f46061db7115 2024-12-07T18:21:37,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d6731a4249264d629fd908983943dcd2 is 50, key is test_row_0/B:col10/1733595696962/Put/seqid=0 2024-12-07T18:21:37,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742394_1570 (size=12001) 2024-12-07T18:21:37,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d6731a4249264d629fd908983943dcd2 2024-12-07T18:21:37,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/46ea84b2eddf4a9186f0315d355e713c is 50, key is test_row_0/C:col10/1733595696962/Put/seqid=0 2024-12-07T18:21:37,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:37,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:37,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742395_1571 (size=12001) 2024-12-07T18:21:37,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/46ea84b2eddf4a9186f0315d355e713c 2024-12-07T18:21:37,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cb04d5aa778f49578208f46061db7115 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115 2024-12-07T18:21:37,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115, entries=150, sequenceid=15, filesize=30.2 K 2024-12-07T18:21:37,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d6731a4249264d629fd908983943dcd2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2 2024-12-07T18:21:37,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2, entries=150, sequenceid=15, filesize=11.7 K 2024-12-07T18:21:37,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/46ea84b2eddf4a9186f0315d355e713c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c 2024-12-07T18:21:37,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c, entries=150, sequenceid=15, filesize=11.7 K 2024-12-07T18:21:37,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 932ms, sequenceid=15, compaction requested=false 2024-12-07T18:21:37,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:38,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-07T18:21:38,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:38,024 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207f4df79edb9cc46ef96ee59c1d7eea0b7_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595696989/Put/seqid=0 2024-12-07T18:21:38,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742396_1572 (size=12154) 2024-12-07T18:21:38,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,050 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207f4df79edb9cc46ef96ee59c1d7eea0b7_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f4df79edb9cc46ef96ee59c1d7eea0b7_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:38,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/9b297b8329f74d8393126ed0bbc7c9e5, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:38,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/9b297b8329f74d8393126ed0bbc7c9e5 is 175, key is test_row_0/A:col10/1733595696989/Put/seqid=0 2024-12-07T18:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:38,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742397_1573 (size=30955) 2024-12-07T18:21:38,067 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/9b297b8329f74d8393126ed0bbc7c9e5 2024-12-07T18:21:38,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a25c01571b36447ea79e7efc62f68863 is 50, key is test_row_0/B:col10/1733595696989/Put/seqid=0 2024-12-07T18:21:38,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742398_1574 (size=12001) 2024-12-07T18:21:38,079 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a25c01571b36447ea79e7efc62f68863 2024-12-07T18:21:38,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d014afffebc34df0a71c833d6bab987c is 50, key is test_row_0/C:col10/1733595696989/Put/seqid=0 2024-12-07T18:21:38,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742399_1575 (size=12001) 2024-12-07T18:21:38,094 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d014afffebc34df0a71c833d6bab987c 2024-12-07T18:21:38,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/9b297b8329f74d8393126ed0bbc7c9e5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5 2024-12-07T18:21:38,103 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5, entries=150, sequenceid=40, filesize=30.2 K 2024-12-07T18:21:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a25c01571b36447ea79e7efc62f68863 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863 2024-12-07T18:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,107 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863, entries=150, sequenceid=40, filesize=11.7 K 2024-12-07T18:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d014afffebc34df0a71c833d6bab987c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c, entries=150, sequenceid=40, filesize=11.7 K 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,111 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 87ms, sequenceid=40, compaction requested=false 2024-12-07T18:21:38,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:38,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:38,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-07T18:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1580 sec 2024-12-07T18:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.1610 sec 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:38,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-07T18:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:38,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:38,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074b8e19d1057c410988a95c5a3429a791_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742400_1576 (size=24358) 2024-12-07T18:21:38,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595758222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595758223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595758224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595758225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595758225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595758328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595758328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595758329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595758329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595758329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595758532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595758533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595758533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595758533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595758534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,590 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:38,593 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074b8e19d1057c410988a95c5a3429a791_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074b8e19d1057c410988a95c5a3429a791_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:38,594 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c89e5fc264ef473a90b2f69036233dd2, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:38,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c89e5fc264ef473a90b2f69036233dd2 is 175, key is test_row_0/A:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:38,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742401_1577 (size=73995) 2024-12-07T18:21:38,597 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c89e5fc264ef473a90b2f69036233dd2 2024-12-07T18:21:38,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7483f2ed5eae4603a82a443f9f95ea2f is 50, key is test_row_0/B:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:38,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742402_1578 (size=12001) 2024-12-07T18:21:38,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595758834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595758836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595758837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595758837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:38,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595758837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7483f2ed5eae4603a82a443f9f95ea2f 2024-12-07T18:21:39,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6ffaa039f0d24c7b806dfe0afaab113d is 50, key is test_row_0/C:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:39,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742403_1579 (size=12001) 2024-12-07T18:21:39,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6ffaa039f0d24c7b806dfe0afaab113d 2024-12-07T18:21:39,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c89e5fc264ef473a90b2f69036233dd2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2 2024-12-07T18:21:39,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2, entries=400, sequenceid=51, filesize=72.3 K 2024-12-07T18:21:39,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7483f2ed5eae4603a82a443f9f95ea2f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f 2024-12-07T18:21:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-07T18:21:39,060 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-07T18:21:39,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-07T18:21:39,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f, entries=150, sequenceid=51, filesize=11.7 K 2024-12-07T18:21:39,063 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T18:21:39,064 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:39,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:39,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6ffaa039f0d24c7b806dfe0afaab113d as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d 2024-12-07T18:21:39,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d, entries=150, sequenceid=51, filesize=11.7 K 2024-12-07T18:21:39,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 919ms, sequenceid=51, compaction requested=true 2024-12-07T18:21:39,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:39,083 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:39,084 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135905 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:39,084 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:39,084 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:39,084 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=132.7 K 2024-12-07T18:21:39,084 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:39,084 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2] 2024-12-07T18:21:39,084 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb04d5aa778f49578208f46061db7115, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595696956 2024-12-07T18:21:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:39,085 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:39,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:39,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:39,085 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b297b8329f74d8393126ed0bbc7c9e5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595696989 2024-12-07T18:21:39,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:39,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:39,085 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting c89e5fc264ef473a90b2f69036233dd2, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698153 2024-12-07T18:21:39,085 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:39,085 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:39,086 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:39,086 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=35.2 K 2024-12-07T18:21:39,086 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d6731a4249264d629fd908983943dcd2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595696956 2024-12-07T18:21:39,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a25c01571b36447ea79e7efc62f68863, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595696989 2024-12-07T18:21:39,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7483f2ed5eae4603a82a443f9f95ea2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698156 2024-12-07T18:21:39,096 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:39,106 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#496 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:39,107 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/936e13e7b0d542eaae019b0d29a21713 is 50, key is test_row_0/B:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:39,109 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207633e387718904929ba9b07c1bdbb89d0_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:39,111 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207633e387718904929ba9b07c1bdbb89d0_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:39,111 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207633e387718904929ba9b07c1bdbb89d0_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:39,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742405_1581 (size=12104) 2024-12-07T18:21:39,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742404_1580 (size=4469) 2024-12-07T18:21:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T18:21:39,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-07T18:21:39,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:39,216 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:39,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:39,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b3e61a487378473d8e525dd7b47c6aef_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595698223/Put/seqid=0 2024-12-07T18:21:39,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742406_1582 (size=12154) 2024-12-07T18:21:39,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:39,264 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207b3e61a487378473d8e525dd7b47c6aef_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b3e61a487378473d8e525dd7b47c6aef_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:39,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a991a45a63164af3b4d442b1dcc4e9bb, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:39,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a991a45a63164af3b4d442b1dcc4e9bb is 175, key is test_row_0/A:col10/1733595698223/Put/seqid=0 2024-12-07T18:21:39,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742407_1583 (size=30955) 2024-12-07T18:21:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:39,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:39,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595759347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595759348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595759348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595759352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595759352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T18:21:39,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595759453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595759453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595759453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595759457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595759457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,559 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#495 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:39,560 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/2920d64de05047c2a2b2e57943223259 is 175, key is test_row_0/A:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:39,560 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/936e13e7b0d542eaae019b0d29a21713 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/936e13e7b0d542eaae019b0d29a21713 2024-12-07T18:21:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742408_1584 (size=31058) 2024-12-07T18:21:39,565 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into 936e13e7b0d542eaae019b0d29a21713(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:39,566 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:39,566 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=13, startTime=1733595699084; duration=0sec 2024-12-07T18:21:39,566 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:39,566 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:39,566 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:39,568 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:39,568 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:39,568 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:39,568 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=35.2 K 2024-12-07T18:21:39,568 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 46ea84b2eddf4a9186f0315d355e713c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733595696956 2024-12-07T18:21:39,569 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d014afffebc34df0a71c833d6bab987c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733595696989 2024-12-07T18:21:39,569 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/2920d64de05047c2a2b2e57943223259 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259 2024-12-07T18:21:39,570 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ffaa039f0d24c7b806dfe0afaab113d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698156 2024-12-07T18:21:39,573 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into 2920d64de05047c2a2b2e57943223259(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:39,573 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:39,573 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=13, startTime=1733595699083; duration=0sec 2024-12-07T18:21:39,573 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:39,573 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:39,578 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#498 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:39,578 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/a0ef0727a8734939aa7a7302c86250d5 is 50, key is test_row_0/C:col10/1733595698163/Put/seqid=0 2024-12-07T18:21:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742409_1585 (size=12104) 2024-12-07T18:21:39,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595759656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595759657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595759657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595759660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595759661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T18:21:39,678 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a991a45a63164af3b4d442b1dcc4e9bb 2024-12-07T18:21:39,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/abf358dbc40e47238248cfc31d67b218 is 50, key is test_row_0/B:col10/1733595698223/Put/seqid=0 2024-12-07T18:21:39,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742410_1586 (size=12001) 2024-12-07T18:21:39,689 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/abf358dbc40e47238248cfc31d67b218 2024-12-07T18:21:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/714dee6695a74148807f1824ae64e5ec is 50, key is test_row_0/C:col10/1733595698223/Put/seqid=0 2024-12-07T18:21:39,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742411_1587 (size=12001) 2024-12-07T18:21:39,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595759959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595759960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595759960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595759961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595759965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:39,986 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/a0ef0727a8734939aa7a7302c86250d5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/a0ef0727a8734939aa7a7302c86250d5 2024-12-07T18:21:39,990 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into a0ef0727a8734939aa7a7302c86250d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:39,991 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:39,991 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=13, startTime=1733595699085; duration=0sec 2024-12-07T18:21:39,991 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:39,991 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:40,099 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/714dee6695a74148807f1824ae64e5ec 2024-12-07T18:21:40,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a991a45a63164af3b4d442b1dcc4e9bb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb 2024-12-07T18:21:40,109 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb, entries=150, sequenceid=76, filesize=30.2 K 2024-12-07T18:21:40,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/abf358dbc40e47238248cfc31d67b218 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218 2024-12-07T18:21:40,113 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218, entries=150, sequenceid=76, filesize=11.7 K 2024-12-07T18:21:40,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/714dee6695a74148807f1824ae64e5ec as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec 2024-12-07T18:21:40,117 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec, entries=150, sequenceid=76, filesize=11.7 K 2024-12-07T18:21:40,119 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 902ms, sequenceid=76, compaction requested=false 2024-12-07T18:21:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-07T18:21:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-07T18:21:40,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-07T18:21:40,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0560 sec 2024-12-07T18:21:40,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.0600 sec 2024-12-07T18:21:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T18:21:40,167 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-07T18:21:40,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-07T18:21:40,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:40,170 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:40,170 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:40,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:40,266 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T18:21:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:40,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:40,323 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:40,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:40,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207db1df40cd20441ac9bd28db6ecf37eef_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595699351/Put/seqid=0 2024-12-07T18:21:40,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742412_1588 (size=12154) 2024-12-07T18:21:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:40,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:40,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595760480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595760481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595760482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595760482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595760483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595760584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595760584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595760586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595760586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595760586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:40,742 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207db1df40cd20441ac9bd28db6ecf37eef_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207db1df40cd20441ac9bd28db6ecf37eef_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:40,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/d5f2740ace67477e9cb66863f36b4351, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:40,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/d5f2740ace67477e9cb66863f36b4351 is 175, key is test_row_0/A:col10/1733595699351/Put/seqid=0 2024-12-07T18:21:40,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742413_1589 (size=30955) 2024-12-07T18:21:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:40,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595760787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595760788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595760789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595760791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:40,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595760791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595761091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595761091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595761093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595761095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595761096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,149 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/d5f2740ace67477e9cb66863f36b4351 2024-12-07T18:21:41,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a4040b6649d647a38b12c4b02ba8c7c0 is 50, key is test_row_0/B:col10/1733595699351/Put/seqid=0 2024-12-07T18:21:41,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742414_1590 (size=12001) 2024-12-07T18:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:41,561 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a4040b6649d647a38b12c4b02ba8c7c0 2024-12-07T18:21:41,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/4fcb9b80e06d47c8b54819741b50bac5 is 50, key is test_row_0/C:col10/1733595699351/Put/seqid=0 2024-12-07T18:21:41,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742415_1591 (size=12001) 2024-12-07T18:21:41,578 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/4fcb9b80e06d47c8b54819741b50bac5 2024-12-07T18:21:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/d5f2740ace67477e9cb66863f36b4351 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351 2024-12-07T18:21:41,593 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351, entries=150, sequenceid=91, filesize=30.2 K 2024-12-07T18:21:41,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a4040b6649d647a38b12c4b02ba8c7c0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0 2024-12-07T18:21:41,598 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0, entries=150, sequenceid=91, filesize=11.7 K 2024-12-07T18:21:41,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/4fcb9b80e06d47c8b54819741b50bac5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5 2024-12-07T18:21:41,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595761597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595761598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595761599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595761599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:41,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595761602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:41,604 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5, entries=150, sequenceid=91, filesize=11.7 K 2024-12-07T18:21:41,605 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1282ms, sequenceid=91, compaction requested=true 2024-12-07T18:21:41,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:41,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:41,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-07T18:21:41,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-07T18:21:41,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-07T18:21:41,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4360 sec 2024-12-07T18:21:41,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.4400 sec 2024-12-07T18:21:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-07T18:21:42,274 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-07T18:21:42,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:42,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-07T18:21:42,279 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-07T18:21:42,280 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:42,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-07T18:21:42,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-07T18:21:42,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:42,432 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:42,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207ddf2203d9703436aa7c827b6c7cf3c37_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595700481/Put/seqid=0 2024-12-07T18:21:42,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742416_1592 (size=12154) 2024-12-07T18:21:42,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-07T18:21:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:42,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:42,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595762609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595762610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595762610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595762611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595762612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595762714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595762715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595762715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595762715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:42,857 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207ddf2203d9703436aa7c827b6c7cf3c37_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ddf2203d9703436aa7c827b6c7cf3c37_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:42,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/940983ba985b43a8b81db5d9f55cf12f, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:42,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/940983ba985b43a8b81db5d9f55cf12f is 175, key is test_row_0/A:col10/1733595700481/Put/seqid=0 2024-12-07T18:21:42,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742417_1593 (size=30955) 2024-12-07T18:21:42,870 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/940983ba985b43a8b81db5d9f55cf12f 2024-12-07T18:21:42,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/337ccbb959824c5e87965e68283265f2 is 50, key is test_row_0/B:col10/1733595700481/Put/seqid=0 2024-12-07T18:21:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-07T18:21:42,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742418_1594 (size=12001) 2024-12-07T18:21:42,887 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/337ccbb959824c5e87965e68283265f2 2024-12-07T18:21:42,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/f2e74ce4b1b4418592074c44c71e0155 is 50, key is test_row_0/C:col10/1733595700481/Put/seqid=0 2024-12-07T18:21:42,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742419_1595 (size=12001) 2024-12-07T18:21:42,898 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/f2e74ce4b1b4418592074c44c71e0155 2024-12-07T18:21:42,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/940983ba985b43a8b81db5d9f55cf12f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f 2024-12-07T18:21:42,905 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f, entries=150, sequenceid=116, filesize=30.2 K 2024-12-07T18:21:42,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/337ccbb959824c5e87965e68283265f2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2 2024-12-07T18:21:42,910 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2, entries=150, sequenceid=116, filesize=11.7 K 2024-12-07T18:21:42,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/f2e74ce4b1b4418592074c44c71e0155 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155 2024-12-07T18:21:42,915 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155, entries=150, sequenceid=116, filesize=11.7 K 2024-12-07T18:21:42,916 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 484ms, sequenceid=116, compaction requested=true 2024-12-07T18:21:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-07T18:21:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-07T18:21:42,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-07T18:21:42,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 638 msec 2024-12-07T18:21:42,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 643 msec 2024-12-07T18:21:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:42,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-07T18:21:42,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:42,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:42,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:42,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:42,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074d9f0bdf286448199fb12b2da1aa6d25_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742420_1596 (size=14694) 2024-12-07T18:21:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595762944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595762944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595762945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595762947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595763050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595763050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595763050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595763050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595763253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595763253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595763254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595763254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,334 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:43,338 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412074d9f0bdf286448199fb12b2da1aa6d25_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074d9f0bdf286448199fb12b2da1aa6d25_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:43,338 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/3fe45c37502f4d24a7a5573a11dc4679, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:43,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/3fe45c37502f4d24a7a5573a11dc4679 is 175, key is test_row_0/A:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:43,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742421_1597 (size=39649) 2024-12-07T18:21:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-07T18:21:43,382 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-07T18:21:43,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-07T18:21:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:43,385 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:43,386 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:43,386 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:43,537 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:43,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:43,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595763556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595763557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595763558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595763559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:43,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:43,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:43,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,743 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/3fe45c37502f4d24a7a5573a11dc4679 2024-12-07T18:21:43,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d2ff4c9e98b64377a69b9db37c1246bd is 50, key is test_row_0/B:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742422_1598 (size=12101) 2024-12-07T18:21:43,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:43,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:43,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:43,996 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:43,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:43,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:43,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595764060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595764062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595764062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595764063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:44,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:44,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d2ff4c9e98b64377a69b9db37c1246bd 2024-12-07T18:21:44,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/ac356d7d5dd64cd4ba83435d0673f620 is 50, key is test_row_0/C:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:44,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742423_1599 (size=12101) 2024-12-07T18:21:44,302 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:44,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:44,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,454 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:44,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:44,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:44,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:44,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/ac356d7d5dd64cd4ba83435d0673f620 2024-12-07T18:21:44,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/3fe45c37502f4d24a7a5573a11dc4679 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679 2024-12-07T18:21:44,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679, entries=200, sequenceid=130, filesize=38.7 K 2024-12-07T18:21:44,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d2ff4c9e98b64377a69b9db37c1246bd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd 2024-12-07T18:21:44,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd, entries=150, sequenceid=130, filesize=11.8 K 2024-12-07T18:21:44,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/ac356d7d5dd64cd4ba83435d0673f620 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620 2024-12-07T18:21:44,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620, entries=150, sequenceid=130, filesize=11.8 K 2024-12-07T18:21:44,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1664ms, sequenceid=130, compaction requested=true 2024-12-07T18:21:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:44,586 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:44,586 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-07T18:21:44,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:44,587 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60208 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-07T18:21:44,587 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:44,587 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,588 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/936e13e7b0d542eaae019b0d29a21713, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=58.8 K 2024-12-07T18:21:44,588 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 163572 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-07T18:21:44,588 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:44,588 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,588 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=159.7 K 2024-12-07T18:21:44,588 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,588 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679] 2024-12-07T18:21:44,596 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 936e13e7b0d542eaae019b0d29a21713, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698156 2024-12-07T18:21:44,596 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2920d64de05047c2a2b2e57943223259, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698156 2024-12-07T18:21:44,596 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting abf358dbc40e47238248cfc31d67b218, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733595698223 2024-12-07T18:21:44,596 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a991a45a63164af3b4d442b1dcc4e9bb, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733595698223 2024-12-07T18:21:44,596 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a4040b6649d647a38b12c4b02ba8c7c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595699347 2024-12-07T18:21:44,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5f2740ace67477e9cb66863f36b4351, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595699347 2024-12-07T18:21:44,597 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 337ccbb959824c5e87965e68283265f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733595700478 2024-12-07T18:21:44,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 940983ba985b43a8b81db5d9f55cf12f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733595700478 2024-12-07T18:21:44,597 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d2ff4c9e98b64377a69b9db37c1246bd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:44,597 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fe45c37502f4d24a7a5573a11dc4679, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:44,607 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,608 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:44,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:44,610 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#510 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:44,610 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/c3139bd6264e41018ba4ee05ffe84599 is 50, key is test_row_0/B:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:44,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:44,619 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:44,624 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207bc223e07ecf644699d9c75e3fb24fa96_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:44,628 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207bc223e07ecf644699d9c75e3fb24fa96_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:44,628 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207bc223e07ecf644699d9c75e3fb24fa96_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742424_1600 (size=12375) 2024-12-07T18:21:44,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207d7ca01f0437e4ad7873deac29c1a335d_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595702943/Put/seqid=0 2024-12-07T18:21:44,638 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/c3139bd6264e41018ba4ee05ffe84599 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/c3139bd6264e41018ba4ee05ffe84599 2024-12-07T18:21:44,643 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into c3139bd6264e41018ba4ee05ffe84599(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:44,643 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:44,643 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=11, startTime=1733595704586; duration=0sec 2024-12-07T18:21:44,643 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:44,643 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:44,643 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-07T18:21:44,645 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60208 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-07T18:21:44,645 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:44,645 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:44,645 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/a0ef0727a8734939aa7a7302c86250d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=58.8 K 2024-12-07T18:21:44,645 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a0ef0727a8734939aa7a7302c86250d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733595698156 2024-12-07T18:21:44,646 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 714dee6695a74148807f1824ae64e5ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733595698223 2024-12-07T18:21:44,646 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fcb9b80e06d47c8b54819741b50bac5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733595699347 2024-12-07T18:21:44,646 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting f2e74ce4b1b4418592074c44c71e0155, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733595700478 2024-12-07T18:21:44,646 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting ac356d7d5dd64cd4ba83435d0673f620, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742425_1601 (size=4469) 2024-12-07T18:21:44,652 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#511 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:44,654 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/98898ab44ce44822b5daacca49bde9fc is 175, key is test_row_0/A:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:44,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595764658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742427_1603 (size=31329) 2024-12-07T18:21:44,666 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:44,666 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/c412f527a364432f8de8e9e12d1a8dc4 is 50, key is test_row_0/C:col10/1733595702921/Put/seqid=0 2024-12-07T18:21:44,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742426_1602 (size=12304) 2024-12-07T18:21:44,671 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/98898ab44ce44822b5daacca49bde9fc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc 2024-12-07T18:21:44,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742428_1604 (size=12375) 2024-12-07T18:21:44,675 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into 98898ab44ce44822b5daacca49bde9fc(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:44,676 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:44,676 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=11, startTime=1733595704585; duration=0sec 2024-12-07T18:21:44,676 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:44,676 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:44,677 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/c412f527a364432f8de8e9e12d1a8dc4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/c412f527a364432f8de8e9e12d1a8dc4 2024-12-07T18:21:44,683 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into c412f527a364432f8de8e9e12d1a8dc4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:44,683 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:44,683 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=11, startTime=1733595704586; duration=0sec 2024-12-07T18:21:44,683 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:44,683 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:44,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595764762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:44,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:44,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595764964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:45,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595765069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595765070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595765070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595765071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,072 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207d7ca01f0437e4ad7873deac29c1a335d_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d7ca01f0437e4ad7873deac29c1a335d_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:45,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ba75643e4d6d4945bc8cf62200412279, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:45,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ba75643e4d6d4945bc8cf62200412279 is 175, key is test_row_0/A:col10/1733595702943/Put/seqid=0 2024-12-07T18:21:45,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742429_1605 (size=31105) 2024-12-07T18:21:45,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595765266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,482 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=152, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ba75643e4d6d4945bc8cf62200412279 2024-12-07T18:21:45,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:45,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/76f637d131ca46a5982122884c6f7af6 is 50, key is test_row_0/B:col10/1733595702943/Put/seqid=0 2024-12-07T18:21:45,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742430_1606 (size=12151) 2024-12-07T18:21:45,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595765772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:45,894 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/76f637d131ca46a5982122884c6f7af6 2024-12-07T18:21:45,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/12aec08048c6431f8232c444113eee3f is 50, key is test_row_0/C:col10/1733595702943/Put/seqid=0 2024-12-07T18:21:45,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742431_1607 (size=12151) 2024-12-07T18:21:45,907 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/12aec08048c6431f8232c444113eee3f 2024-12-07T18:21:45,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ba75643e4d6d4945bc8cf62200412279 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279 2024-12-07T18:21:45,915 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279, entries=150, sequenceid=152, filesize=30.4 K 2024-12-07T18:21:45,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/76f637d131ca46a5982122884c6f7af6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6 2024-12-07T18:21:45,919 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6, entries=150, sequenceid=152, filesize=11.9 K 2024-12-07T18:21:45,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/12aec08048c6431f8232c444113eee3f as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f 2024-12-07T18:21:45,924 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f, entries=150, sequenceid=152, filesize=11.9 K 2024-12-07T18:21:45,924 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1316ms, sequenceid=152, compaction requested=false 2024-12-07T18:21:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-07T18:21:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-07T18:21:45,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-07T18:21:45,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5400 sec 2024-12-07T18:21:45,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.5450 sec 2024-12-07T18:21:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:46,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:46,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e4b932202c3743a6b5fa684ea06b25b5_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742432_1608 (size=12304) 2024-12-07T18:21:46,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595766833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:46,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595766936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595767074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,076 DEBUG [Thread-2508 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:47,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595767077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595767077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,078 DEBUG [Thread-2510 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:47,079 DEBUG [Thread-2514 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:47,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595767080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,082 DEBUG [Thread-2506 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:47,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595767140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,190 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:47,193 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207e4b932202c3743a6b5fa684ea06b25b5_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e4b932202c3743a6b5fa684ea06b25b5_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:47,194 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/fc124231ce434cab9c80d219e8f22c15, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:47,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/fc124231ce434cab9c80d219e8f22c15 is 175, key is test_row_0/A:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742433_1609 (size=31105) 2024-12-07T18:21:47,199 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/fc124231ce434cab9c80d219e8f22c15 2024-12-07T18:21:47,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/23a92ec039c64750b3102fa170594805 is 50, key is test_row_0/B:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:47,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742434_1610 (size=12151) 2024-12-07T18:21:47,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595767444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T18:21:47,491 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-07T18:21:47,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:47,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-12-07T18:21:47,494 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:47,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-07T18:21:47,494 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:47,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:47,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-07T18:21:47,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/23a92ec039c64750b3102fa170594805 2024-12-07T18:21:47,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6c0f334859ed4c668e78c5c6930638f5 is 50, key is test_row_0/C:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:47,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742435_1611 (size=12151) 2024-12-07T18:21:47,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6c0f334859ed4c668e78c5c6930638f5 2024-12-07T18:21:47,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/fc124231ce434cab9c80d219e8f22c15 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15 2024-12-07T18:21:47,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15, entries=150, sequenceid=170, filesize=30.4 K 2024-12-07T18:21:47,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/23a92ec039c64750b3102fa170594805 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805 2024-12-07T18:21:47,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805, entries=150, sequenceid=170, filesize=11.9 K 2024-12-07T18:21:47,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/6c0f334859ed4c668e78c5c6930638f5 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5 2024-12-07T18:21:47,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5, entries=150, sequenceid=170, filesize=11.9 K 2024-12-07T18:21:47,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 858ms, sequenceid=170, compaction requested=true 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:47,636 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:47,636 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:47,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:47,637 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93539 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:47,637 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:47,637 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:47,637 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=91.3 K 2024-12-07T18:21:47,637 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:47,637 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15] 2024-12-07T18:21:47,638 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:47,638 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:47,638 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:47,638 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98898ab44ce44822b5daacca49bde9fc, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:47,638 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/c3139bd6264e41018ba4ee05ffe84599, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=35.8 K 2024-12-07T18:21:47,638 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba75643e4d6d4945bc8cf62200412279, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733595702943 2024-12-07T18:21:47,638 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c3139bd6264e41018ba4ee05ffe84599, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:47,639 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc124231ce434cab9c80d219e8f22c15, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:47,639 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 76f637d131ca46a5982122884c6f7af6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733595702943 2024-12-07T18:21:47,639 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 23a92ec039c64750b3102fa170594805, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:47,644 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:47,645 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:47,645 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/2291c604e00a46daa90e2e02bc067167 is 50, key is test_row_0/B:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:47,646 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:47,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-07T18:21:47,646 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412077ba7cd3e1bbf484784175b7fcd8a24e5_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:47,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:47,646 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-07T18:21:47,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:47,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:47,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:47,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:47,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:47,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:47,648 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412077ba7cd3e1bbf484784175b7fcd8a24e5_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:47,649 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412077ba7cd3e1bbf484784175b7fcd8a24e5_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:47,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742436_1612 (size=12527) 2024-12-07T18:21:47,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742437_1613 (size=4469) 2024-12-07T18:21:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120743d2d3433fa443e1894a26d310d99276_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595706832/Put/seqid=0 2024-12-07T18:21:47,684 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#520 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:47,685 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a5f0da2864f84ee4b09d8ebf08d35511 is 175, key is test_row_0/A:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:47,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742438_1614 (size=12304) 2024-12-07T18:21:47,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742439_1615 (size=31481) 2024-12-07T18:21:47,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-07T18:21:47,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:47,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:47,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595767981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,082 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/2291c604e00a46daa90e2e02bc067167 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2291c604e00a46daa90e2e02bc067167 2024-12-07T18:21:48,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:48,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595768084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,086 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into 2291c604e00a46daa90e2e02bc067167(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:48,086 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:48,086 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=13, startTime=1733595707636; duration=0sec 2024-12-07T18:21:48,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:48,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:48,087 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:48,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:48,088 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:48,088 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,088 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/c412f527a364432f8de8e9e12d1a8dc4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=35.8 K 2024-12-07T18:21:48,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:48,089 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting c412f527a364432f8de8e9e12d1a8dc4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733595702608 2024-12-07T18:21:48,089 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 12aec08048c6431f8232c444113eee3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733595702943 2024-12-07T18:21:48,089 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c0f334859ed4c668e78c5c6930638f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:48,091 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120743d2d3433fa443e1894a26d310d99276_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120743d2d3433fa443e1894a26d310d99276_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:48,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ce6110bdee0a456facc0e57aed41cad7, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:48,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ce6110bdee0a456facc0e57aed41cad7 is 175, key is test_row_0/A:col10/1733595706832/Put/seqid=0 2024-12-07T18:21:48,093 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/a5f0da2864f84ee4b09d8ebf08d35511 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511 2024-12-07T18:21:48,097 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into a5f0da2864f84ee4b09d8ebf08d35511(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:48,097 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:48,097 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=13, startTime=1733595707636; duration=0sec 2024-12-07T18:21:48,098 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:48,098 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:48,098 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#522 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:48,098 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/2bad94f0637746f3b40096d79e5a29f7 is 50, key is test_row_0/C:col10/1733595706777/Put/seqid=0 2024-12-07T18:21:48,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-07T18:21:48,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742440_1616 (size=31105) 2024-12-07T18:21:48,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742441_1617 (size=12527) 2024-12-07T18:21:48,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:48,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595768288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,501 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=191, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ce6110bdee0a456facc0e57aed41cad7 2024-12-07T18:21:48,508 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/2bad94f0637746f3b40096d79e5a29f7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/2bad94f0637746f3b40096d79e5a29f7 2024-12-07T18:21:48,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1b948edc854c4153b1db4695c38e5878 is 50, key is test_row_0/B:col10/1733595706832/Put/seqid=0 2024-12-07T18:21:48,514 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into 2bad94f0637746f3b40096d79e5a29f7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:48,514 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:48,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742442_1618 (size=12151) 2024-12-07T18:21:48,514 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=13, startTime=1733595707636; duration=0sec 2024-12-07T18:21:48,514 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:48,514 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:48,515 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1b948edc854c4153b1db4695c38e5878 2024-12-07T18:21:48,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/fc076d6c11824be5bef5e9cdb6998285 is 50, key is test_row_0/C:col10/1733595706832/Put/seqid=0 2024-12-07T18:21:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742443_1619 (size=12151) 2024-12-07T18:21:48,525 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/fc076d6c11824be5bef5e9cdb6998285 2024-12-07T18:21:48,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/ce6110bdee0a456facc0e57aed41cad7 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7 2024-12-07T18:21:48,532 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7, entries=150, sequenceid=191, filesize=30.4 K 2024-12-07T18:21:48,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1b948edc854c4153b1db4695c38e5878 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878 2024-12-07T18:21:48,536 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878, entries=150, sequenceid=191, filesize=11.9 K 2024-12-07T18:21:48,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/fc076d6c11824be5bef5e9cdb6998285 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285 2024-12-07T18:21:48,539 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285, entries=150, sequenceid=191, filesize=11.9 K 2024-12-07T18:21:48,540 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 894ms, sequenceid=191, compaction requested=false 2024-12-07T18:21:48,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:48,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-07T18:21:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-12-07T18:21:48,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-07T18:21:48,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0470 sec 2024-12-07T18:21:48,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 1.0500 sec 2024-12-07T18:21:48,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:48,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-07T18:21:48,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:48,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-07T18:21:48,600 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-07T18:21:48,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:48,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073014fde487424510bf3d078aa2aaf3a0_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-12-07T18:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:48,603 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:48,604 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:48,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:48,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742444_1620 (size=12304) 2024-12-07T18:21:48,607 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:48,610 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412073014fde487424510bf3d078aa2aaf3a0_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073014fde487424510bf3d078aa2aaf3a0_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:48,611 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/30aeae916bba419ea625de5bb8856da0, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:48,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/30aeae916bba419ea625de5bb8856da0 is 175, key is test_row_0/A:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:48,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742445_1621 (size=31105) 2024-12-07T18:21:48,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595768647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:48,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:48,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595768750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:48,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:48,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:48,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:48,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:48,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:48,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:48,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:48,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595768953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,016 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/30aeae916bba419ea625de5bb8856da0 2024-12-07T18:21:49,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/22299a8a62944ce0aec00bd14703edf0 is 50, key is test_row_0/B:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:49,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742446_1622 (size=12151) 2024-12-07T18:21:49,065 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:49,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:49,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:49,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:49,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:49,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595769256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:49,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:49,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:49,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/22299a8a62944ce0aec00bd14703edf0 2024-12-07T18:21:49,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/15be93039235462ab43ac04ea8970f7c is 50, key is test_row_0/C:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:49,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742447_1623 (size=12151) 2024-12-07T18:21:49,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/15be93039235462ab43ac04ea8970f7c 2024-12-07T18:21:49,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/30aeae916bba419ea625de5bb8856da0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0 2024-12-07T18:21:49,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0, entries=150, sequenceid=210, filesize=30.4 K 2024-12-07T18:21:49,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/22299a8a62944ce0aec00bd14703edf0 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0 2024-12-07T18:21:49,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0, entries=150, sequenceid=210, filesize=11.9 K 2024-12-07T18:21:49,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/15be93039235462ab43ac04ea8970f7c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c 2024-12-07T18:21:49,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c, entries=150, sequenceid=210, filesize=11.9 K 2024-12-07T18:21:49,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 872ms, sequenceid=210, compaction requested=true 2024-12-07T18:21:49,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:49,467 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:49,468 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:49,468 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:49,469 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,469 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=91.5 K 2024-12-07T18:21:49,469 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,469 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0] 2024-12-07T18:21:49,469 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5f0da2864f84ee4b09d8ebf08d35511, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:49,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:49,469 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce6110bdee0a456facc0e57aed41cad7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595706821 2024-12-07T18:21:49,470 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30aeae916bba419ea625de5bb8856da0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:49,471 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:49,472 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:49,472 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:49,473 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,473 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2291c604e00a46daa90e2e02bc067167, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.0 K 2024-12-07T18:21:49,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2291c604e00a46daa90e2e02bc067167, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:49,473 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b948edc854c4153b1db4695c38e5878, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595706821 2024-12-07T18:21:49,474 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 22299a8a62944ce0aec00bd14703edf0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:49,478 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:49,480 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120703ad7603e5744a388bf3c40defeba315_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:49,482 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120703ad7603e5744a388bf3c40defeba315_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:49,482 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120703ad7603e5744a388bf3c40defeba315_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:49,484 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:49,485 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/38e39d00fddb47468e347b0353df1c54 is 50, key is test_row_0/B:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:49,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742448_1624 (size=4469) 2024-12-07T18:21:49,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742449_1625 (size=12629) 2024-12-07T18:21:49,496 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/38e39d00fddb47468e347b0353df1c54 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/38e39d00fddb47468e347b0353df1c54 2024-12-07T18:21:49,500 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into 38e39d00fddb47468e347b0353df1c54(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:49,500 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:49,500 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=13, startTime=1733595709471; duration=0sec 2024-12-07T18:21:49,500 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:49,500 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:49,500 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:49,501 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:49,501 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:49,501 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,501 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/2bad94f0637746f3b40096d79e5a29f7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.0 K 2024-12-07T18:21:49,501 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bad94f0637746f3b40096d79e5a29f7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733595704647 2024-12-07T18:21:49,502 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting fc076d6c11824be5bef5e9cdb6998285, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733595706821 2024-12-07T18:21:49,502 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 15be93039235462ab43ac04ea8970f7c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:49,508 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#530 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:49,508 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/982210252b89436987597f996d5b3ddd is 50, key is test_row_0/C:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:49,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742450_1626 (size=12629) 2024-12-07T18:21:49,515 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/982210252b89436987597f996d5b3ddd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/982210252b89436987597f996d5b3ddd 2024-12-07T18:21:49,520 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into 982210252b89436987597f996d5b3ddd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:49,520 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:49,520 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=13, startTime=1733595709471; duration=0sec 2024-12-07T18:21:49,520 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:49,520 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:49,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:49,524 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:49,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:49,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207cd4135bdae9d42628ae7013a33a689e4_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595708636/Put/seqid=0 2024-12-07T18:21:49,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742451_1627 (size=12304) 2024-12-07T18:21:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:49,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595769800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,891 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#528 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:49,891 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cd3804e51f4548e9815fd3c409fcf832 is 175, key is test_row_0/A:col10/1733595707976/Put/seqid=0 2024-12-07T18:21:49,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742452_1628 (size=31583) 2024-12-07T18:21:49,898 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cd3804e51f4548e9815fd3c409fcf832 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832 2024-12-07T18:21:49,903 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into cd3804e51f4548e9815fd3c409fcf832(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:49,903 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:49,903 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=13, startTime=1733595709467; duration=0sec 2024-12-07T18:21:49,903 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:49,903 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:49,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595769902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:49,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:49,937 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207cd4135bdae9d42628ae7013a33a689e4_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cd4135bdae9d42628ae7013a33a689e4_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:49,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/38c4e20617b846e1a4e4773e629ae808, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:49,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/38c4e20617b846e1a4e4773e629ae808 is 175, key is test_row_0/A:col10/1733595708636/Put/seqid=0 2024-12-07T18:21:49,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742453_1629 (size=31105) 2024-12-07T18:21:50,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595770105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:50,343 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=232, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/38c4e20617b846e1a4e4773e629ae808 2024-12-07T18:21:50,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7d47ff935db94ee4910f8d3dca8ff4ba is 50, key is test_row_0/B:col10/1733595708636/Put/seqid=0 2024-12-07T18:21:50,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742454_1630 (size=12151) 2024-12-07T18:21:50,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595770408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:50,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:50,753 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7d47ff935db94ee4910f8d3dca8ff4ba 2024-12-07T18:21:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/967e331083f7412392cb677b1de87df2 is 50, key is test_row_0/C:col10/1733595708636/Put/seqid=0 2024-12-07T18:21:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742455_1631 (size=12151) 2024-12-07T18:21:50,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:50,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595770912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:51,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:51,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1733595771085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:51,086 DEBUG [Thread-2506 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:51,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:51,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56900 deadline: 1733595771086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:51,088 DEBUG [Thread-2508 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:51,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:51,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56846 deadline: 1733595771096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:51,098 DEBUG [Thread-2514 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:51,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:51,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56882 deadline: 1733595771103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:51,105 DEBUG [Thread-2510 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., hostname=8a7a030b35db,45237,1733595542335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T18:21:51,165 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/967e331083f7412392cb677b1de87df2 2024-12-07T18:21:51,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/38c4e20617b846e1a4e4773e629ae808 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808 2024-12-07T18:21:51,173 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808, entries=150, sequenceid=232, filesize=30.4 K 2024-12-07T18:21:51,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7d47ff935db94ee4910f8d3dca8ff4ba as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba 2024-12-07T18:21:51,178 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba, entries=150, sequenceid=232, filesize=11.9 K 2024-12-07T18:21:51,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/967e331083f7412392cb677b1de87df2 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2 2024-12-07T18:21:51,186 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2, entries=150, sequenceid=232, filesize=11.9 K 2024-12-07T18:21:51,186 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1662ms, sequenceid=232, compaction requested=false 2024-12-07T18:21:51,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:51,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:51,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-12-07T18:21:51,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-12-07T18:21:51,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-07T18:21:51,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5840 sec 2024-12-07T18:21:51,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 2.5880 sec 2024-12-07T18:21:51,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:51,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:51,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207af566f1a92c341ed9eb5a600dc2a202f_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:51,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742456_1632 (size=14794) 2024-12-07T18:21:51,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595771966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:52,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595772069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:52,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595772272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:52,333 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:52,336 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207af566f1a92c341ed9eb5a600dc2a202f_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207af566f1a92c341ed9eb5a600dc2a202f_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:52,337 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/e968d5150c08481897312cc94acb5e75, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:52,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/e968d5150c08481897312cc94acb5e75 is 175, key is test_row_0/A:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742457_1633 (size=39749) 2024-12-07T18:21:52,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595772576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-07T18:21:52,708 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-07T18:21:52,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-12-07T18:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:52,711 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:52,711 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:52,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:52,742 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/e968d5150c08481897312cc94acb5e75 2024-12-07T18:21:52,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d874f5ab572e487f94d9659a19af40b6 is 50, key is test_row_0/B:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:52,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742458_1634 (size=12151) 2024-12-07T18:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:52,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:52,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:52,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:52,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:52,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:52,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:52,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:53,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:53,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:53,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595773078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d874f5ab572e487f94d9659a19af40b6 2024-12-07T18:21:53,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d223591949ae4b12a7639d0248e89754 is 50, key is test_row_0/C:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:53,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742459_1635 (size=12151) 2024-12-07T18:21:53,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:53,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:53,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:53,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:53,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:53,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:53,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:53,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:53,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d223591949ae4b12a7639d0248e89754 2024-12-07T18:21:53,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/e968d5150c08481897312cc94acb5e75 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75 2024-12-07T18:21:53,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75, entries=200, sequenceid=250, filesize=38.8 K 2024-12-07T18:21:53,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/d874f5ab572e487f94d9659a19af40b6 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6 2024-12-07T18:21:53,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6, entries=150, sequenceid=250, filesize=11.9 K 2024-12-07T18:21:53,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/d223591949ae4b12a7639d0248e89754 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754 2024-12-07T18:21:53,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754, entries=150, sequenceid=250, filesize=11.9 K 2024-12-07T18:21:53,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1659ms, sequenceid=250, compaction requested=true 2024-12-07T18:21:53,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:53,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:53,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:53,580 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:53,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:53,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:53,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:53,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:53,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:53,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:53,581 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:53,581 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:53,581 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:53,581 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,581 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,581 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/38e39d00fddb47468e347b0353df1c54, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.1 K 2024-12-07T18:21:53,582 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=100.0 K 2024-12-07T18:21:53,582 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,582 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75] 2024-12-07T18:21:53,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e39d00fddb47468e347b0353df1c54, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:53,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d47ff935db94ee4910f8d3dca8ff4ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733595708636 2024-12-07T18:21:53,582 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd3804e51f4548e9815fd3c409fcf832, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:53,582 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d874f5ab572e487f94d9659a19af40b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:53,583 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38c4e20617b846e1a4e4773e629ae808, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733595708636 2024-12-07T18:21:53,583 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting e968d5150c08481897312cc94acb5e75, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:53,590 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:53,590 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a65a2985bdb3409cabab448cce75fb38 is 50, key is test_row_0/B:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:53,591 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:53,593 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120741d9a77bd73a4b879e8060eab30d6b90_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:53,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742460_1636 (size=12731) 2024-12-07T18:21:53,596 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120741d9a77bd73a4b879e8060eab30d6b90_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:53,596 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120741d9a77bd73a4b879e8060eab30d6b90_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742461_1637 (size=4469) 2024-12-07T18:21:53,600 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a65a2985bdb3409cabab448cce75fb38 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a65a2985bdb3409cabab448cce75fb38 2024-12-07T18:21:53,601 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#538 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:53,602 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf2020b344e04bf59f1df3d49a862155 is 175, key is test_row_0/A:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:53,605 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into a65a2985bdb3409cabab448cce75fb38(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:53,605 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:53,605 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=13, startTime=1733595713580; duration=0sec 2024-12-07T18:21:53,605 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:53,605 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:53,605 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742462_1638 (size=31685) 2024-12-07T18:21:53,606 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:53,607 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:53,607 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,607 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/982210252b89436987597f996d5b3ddd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.1 K 2024-12-07T18:21:53,607 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 982210252b89436987597f996d5b3ddd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733595707973 2024-12-07T18:21:53,607 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 967e331083f7412392cb677b1de87df2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733595708636 2024-12-07T18:21:53,608 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting d223591949ae4b12a7639d0248e89754, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:53,612 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf2020b344e04bf59f1df3d49a862155 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155 2024-12-07T18:21:53,616 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#539 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:53,616 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into cf2020b344e04bf59f1df3d49a862155(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:53,616 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:53,616 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=13, startTime=1733595713580; duration=0sec 2024-12-07T18:21:53,617 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:53,617 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:53,617 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/24794a8985814bc8a823e49789b0f7f1 is 50, key is test_row_0/C:col10/1733595711920/Put/seqid=0 2024-12-07T18:21:53,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742463_1639 (size=12731) 2024-12-07T18:21:53,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:53,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:53,627 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:53,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412079819badbaf244a799bb41409a8ad14cc_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595711948/Put/seqid=0 2024-12-07T18:21:53,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742464_1640 (size=12454) 2024-12-07T18:21:53,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:54,026 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/24794a8985814bc8a823e49789b0f7f1 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/24794a8985814bc8a823e49789b0f7f1 2024-12-07T18:21:54,030 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into 24794a8985814bc8a823e49789b0f7f1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:54,030 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:54,030 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=13, startTime=1733595713581; duration=0sec 2024-12-07T18:21:54,030 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:54,030 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:54,058 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412079819badbaf244a799bb41409a8ad14cc_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079819badbaf244a799bb41409a8ad14cc_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:54,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cafbfbc75eaf44d4bcca8986bdd52ee4, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:54,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cafbfbc75eaf44d4bcca8986bdd52ee4 is 175, key is test_row_0/A:col10/1733595711948/Put/seqid=0 2024-12-07T18:21:54,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742465_1641 (size=31255) 2024-12-07T18:21:54,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:54,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:54,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:54,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595774119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:54,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:54,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595774222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:54,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:54,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595774424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:54,474 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=271, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cafbfbc75eaf44d4bcca8986bdd52ee4 2024-12-07T18:21:54,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a68e56c92007495ba0473afe86c4a9ac is 50, key is test_row_0/B:col10/1733595711948/Put/seqid=0 2024-12-07T18:21:54,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742466_1642 (size=12301) 2024-12-07T18:21:54,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595774728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:54,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:54,884 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a68e56c92007495ba0473afe86c4a9ac 2024-12-07T18:21:54,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5d155a0973cb45e0b9e73a4b9781befd is 50, key is test_row_0/C:col10/1733595711948/Put/seqid=0 2024-12-07T18:21:54,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742467_1643 (size=12301) 2024-12-07T18:21:54,898 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5d155a0973cb45e0b9e73a4b9781befd 2024-12-07T18:21:54,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cafbfbc75eaf44d4bcca8986bdd52ee4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4 2024-12-07T18:21:54,905 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4, entries=150, sequenceid=271, filesize=30.5 K 2024-12-07T18:21:54,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/a68e56c92007495ba0473afe86c4a9ac as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac 2024-12-07T18:21:54,909 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac, entries=150, sequenceid=271, filesize=12.0 K 2024-12-07T18:21:54,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5d155a0973cb45e0b9e73a4b9781befd as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd 2024-12-07T18:21:54,913 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd, entries=150, sequenceid=271, filesize=12.0 K 2024-12-07T18:21:54,914 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1287ms, sequenceid=271, compaction requested=false 2024-12-07T18:21:54,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:54,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:54,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-07T18:21:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-07T18:21:54,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-07T18:21:54,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2040 sec 2024-12-07T18:21:54,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 2.2070 sec 2024-12-07T18:21:55,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:55,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:55,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:55,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207fa9e90f7bb3042ac92b5ab38ab83d510_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:55,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742468_1644 (size=12454) 2024-12-07T18:21:55,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:55,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595775273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:55,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:55,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595775376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:55,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595775580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:55,649 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:55,653 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207fa9e90f7bb3042ac92b5ab38ab83d510_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fa9e90f7bb3042ac92b5ab38ab83d510_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:55,654 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/03c52bcc8a954808b51a332e66503b03, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:55,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/03c52bcc8a954808b51a332e66503b03 is 175, key is test_row_0/A:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742469_1645 (size=31255) 2024-12-07T18:21:55,659 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/03c52bcc8a954808b51a332e66503b03 2024-12-07T18:21:55,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7139b9306f6941c7aa50bde5069cef44 is 50, key is test_row_0/B:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:55,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742470_1646 (size=12301) 2024-12-07T18:21:55,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595775884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:56,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7139b9306f6941c7aa50bde5069cef44 2024-12-07T18:21:56,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/502c64ffdc73402193274898f1a58535 is 50, key is test_row_0/C:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742471_1647 (size=12301) 2024-12-07T18:21:56,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/502c64ffdc73402193274898f1a58535 2024-12-07T18:21:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/03c52bcc8a954808b51a332e66503b03 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03 2024-12-07T18:21:56,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03, entries=150, sequenceid=290, filesize=30.5 K 2024-12-07T18:21:56,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/7139b9306f6941c7aa50bde5069cef44 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44 2024-12-07T18:21:56,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44, entries=150, sequenceid=290, filesize=12.0 K 2024-12-07T18:21:56,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/502c64ffdc73402193274898f1a58535 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535 2024-12-07T18:21:56,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535, entries=150, sequenceid=290, filesize=12.0 K 2024-12-07T18:21:56,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 881ms, sequenceid=290, compaction requested=true 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:A, priority=-2147483648, current under compaction store size is 1 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:B, priority=-2147483648, current under compaction store size is 2 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f25402e6c4eaa56d1d09719bc4c6a4c:C, priority=-2147483648, current under compaction store size is 3 2024-12-07T18:21:56,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:56,114 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:56,114 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:56,115 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:56,115 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/B is initiating minor compaction (all files) 2024-12-07T18:21:56,115 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/B in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,115 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a65a2985bdb3409cabab448cce75fb38, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.5 K 2024-12-07T18:21:56,116 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a65a2985bdb3409cabab448cce75fb38, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:56,116 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:56,116 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/A is initiating minor compaction (all files) 2024-12-07T18:21:56,116 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/A in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,116 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=92.0 K 2024-12-07T18:21:56,116 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,116 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. files: [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03] 2024-12-07T18:21:56,117 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting a68e56c92007495ba0473afe86c4a9ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595711948 2024-12-07T18:21:56,117 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf2020b344e04bf59f1df3d49a862155, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:56,117 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting cafbfbc75eaf44d4bcca8986bdd52ee4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595711948 2024-12-07T18:21:56,117 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 7139b9306f6941c7aa50bde5069cef44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733595714116 2024-12-07T18:21:56,118 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03c52bcc8a954808b51a332e66503b03, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733595714116 2024-12-07T18:21:56,130 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#B#compaction#546 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:56,130 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/2752f229851149d98374be86bdcfdabc is 50, key is test_row_0/B:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:56,132 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:56,137 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241207d6bf79b593eb4aa486457114c55bd9a4_5f25402e6c4eaa56d1d09719bc4c6a4c store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:56,138 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241207d6bf79b593eb4aa486457114c55bd9a4_5f25402e6c4eaa56d1d09719bc4c6a4c, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:56,139 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207d6bf79b593eb4aa486457114c55bd9a4_5f25402e6c4eaa56d1d09719bc4c6a4c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:56,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742473_1649 (size=4469) 2024-12-07T18:21:56,159 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#A#compaction#547 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:56,160 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/aa35d74631854e0e85b79a31a31b3023 is 175, key is test_row_0/A:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:56,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742472_1648 (size=12983) 2024-12-07T18:21:56,176 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/2752f229851149d98374be86bdcfdabc as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2752f229851149d98374be86bdcfdabc 2024-12-07T18:21:56,183 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/B of 5f25402e6c4eaa56d1d09719bc4c6a4c into 2752f229851149d98374be86bdcfdabc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:56,183 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:56,183 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/B, priority=13, startTime=1733595716114; duration=0sec 2024-12-07T18:21:56,183 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-07T18:21:56,184 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:B 2024-12-07T18:21:56,184 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T18:21:56,185 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T18:21:56,185 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1540): 5f25402e6c4eaa56d1d09719bc4c6a4c/C is initiating minor compaction (all files) 2024-12-07T18:21:56,185 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f25402e6c4eaa56d1d09719bc4c6a4c/C in TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,185 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/24794a8985814bc8a823e49789b0f7f1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535] into tmpdir=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp, totalSize=36.5 K 2024-12-07T18:21:56,186 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 24794a8985814bc8a823e49789b0f7f1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733595709787 2024-12-07T18:21:56,186 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d155a0973cb45e0b9e73a4b9781befd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733595711948 2024-12-07T18:21:56,186 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] compactions.Compactor(224): Compacting 502c64ffdc73402193274898f1a58535, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733595714116 2024-12-07T18:21:56,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742474_1650 (size=31937) 2024-12-07T18:21:56,197 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/aa35d74631854e0e85b79a31a31b3023 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/aa35d74631854e0e85b79a31a31b3023 2024-12-07T18:21:56,201 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/A of 5f25402e6c4eaa56d1d09719bc4c6a4c into aa35d74631854e0e85b79a31a31b3023(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:56,201 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:56,201 INFO [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/A, priority=13, startTime=1733595716114; duration=0sec 2024-12-07T18:21:56,201 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:56,201 DEBUG [RS:0;8a7a030b35db:45237-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:A 2024-12-07T18:21:56,206 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f25402e6c4eaa56d1d09719bc4c6a4c#C#compaction#548 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T18:21:56,207 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/ea32e89844fb464585a3d714ca44f50c is 50, key is test_row_0/C:col10/1733595714116/Put/seqid=0 2024-12-07T18:21:56,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742475_1651 (size=12983) 2024-12-07T18:21:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:56,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:56,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412075270c6a3b1ae4b73b848c14438bf6e60_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595715267/Put/seqid=0 2024-12-07T18:21:56,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742476_1652 (size=12454) 2024-12-07T18:21:56,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595776437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:56,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:56,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595776540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:56,619 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/ea32e89844fb464585a3d714ca44f50c as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ea32e89844fb464585a3d714ca44f50c 2024-12-07T18:21:56,623 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f25402e6c4eaa56d1d09719bc4c6a4c/C of 5f25402e6c4eaa56d1d09719bc4c6a4c into ea32e89844fb464585a3d714ca44f50c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T18:21:56,623 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:56,623 INFO [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c., storeName=5f25402e6c4eaa56d1d09719bc4c6a4c/C, priority=13, startTime=1733595716114; duration=0sec 2024-12-07T18:21:56,623 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T18:21:56,623 DEBUG [RS:0;8a7a030b35db:45237-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f25402e6c4eaa56d1d09719bc4c6a4c:C 2024-12-07T18:21:56,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:56,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595776743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:56,804 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:56,807 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412075270c6a3b1ae4b73b848c14438bf6e60_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075270c6a3b1ae4b73b848c14438bf6e60_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:56,808 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/947a1d256aa34a6b8f9567cd5137bfd3, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:56,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/947a1d256aa34a6b8f9567cd5137bfd3 is 175, key is test_row_0/A:col10/1733595715267/Put/seqid=0 2024-12-07T18:21:56,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742477_1653 (size=31255) 2024-12-07T18:21:56,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T18:21:56,816 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-12-07T18:21:56,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-07T18:21:56,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-12-07T18:21:56,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:56,819 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-07T18:21:56,820 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T18:21:56,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T18:21:56,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:56,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:56,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:56,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:56,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:56,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:56,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:56,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:56,982 DEBUG [Thread-2521 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4aa4b067 to 127.0.0.1:56016 2024-12-07T18:21:56,982 DEBUG [Thread-2521 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:56,983 DEBUG [Thread-2519 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3852b0e3 to 127.0.0.1:56016 2024-12-07T18:21:56,983 DEBUG [Thread-2523 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ca17819 to 127.0.0.1:56016 2024-12-07T18:21:56,983 DEBUG [Thread-2523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:56,983 DEBUG [Thread-2519 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:56,984 DEBUG [Thread-2517 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6eb94416 to 127.0.0.1:56016 2024-12-07T18:21:56,984 DEBUG [Thread-2517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:56,985 DEBUG [Thread-2525 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77a6a62c to 127.0.0.1:56016 2024-12-07T18:21:56,985 DEBUG [Thread-2525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:57,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595777046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:57,124 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,213 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/947a1d256aa34a6b8f9567cd5137bfd3 2024-12-07T18:21:57,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1787ee47cd8a443eb2efc9a285dbdce9 is 50, key is test_row_0/B:col10/1733595715267/Put/seqid=0 2024-12-07T18:21:57,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742478_1654 (size=12301) 2024-12-07T18:21:57,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:57,428 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T18:21:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56920 deadline: 1733595777549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1787ee47cd8a443eb2efc9a285dbdce9 2024-12-07T18:21:57,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5c963538c9b4495eb53d90a4c75af114 is 50, key is test_row_0/C:col10/1733595715267/Put/seqid=0 2024-12-07T18:21:57,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742479_1655 (size=12301) 2024-12-07T18:21:57,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:57,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:57,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:57,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:57,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:58,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5c963538c9b4495eb53d90a4c75af114 2024-12-07T18:21:58,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/947a1d256aa34a6b8f9567cd5137bfd3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/947a1d256aa34a6b8f9567cd5137bfd3 2024-12-07T18:21:58,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/947a1d256aa34a6b8f9567cd5137bfd3, entries=150, sequenceid=311, filesize=30.5 K 2024-12-07T18:21:58,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/1787ee47cd8a443eb2efc9a285dbdce9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1787ee47cd8a443eb2efc9a285dbdce9 2024-12-07T18:21:58,037 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:58,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:58,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:58,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:58,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:58,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1787ee47cd8a443eb2efc9a285dbdce9, entries=150, sequenceid=311, filesize=12.0 K 2024-12-07T18:21:58,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:58,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:58,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5c963538c9b4495eb53d90a4c75af114 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5c963538c9b4495eb53d90a4c75af114 2024-12-07T18:21:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T18:21:58,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5c963538c9b4495eb53d90a4c75af114, entries=150, sequenceid=311, filesize=12.0 K 2024-12-07T18:21:58,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1649ms, sequenceid=311, compaction requested=false 2024-12-07T18:21:58,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:58,190 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:21:58,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45237 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-07T18:21:58,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:58,190 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:21:58,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:21:58,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071fb35bca3e524b64b7d3a9ff43419fff_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595716431/Put/seqid=0 2024-12-07T18:21:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742480_1656 (size=12454) 2024-12-07T18:21:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45237 {}] regionserver.HRegion(8581): Flush requested on 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:58,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. as already flushing 2024-12-07T18:21:58,555 DEBUG [Thread-2512 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53ef82c4 to 127.0.0.1:56016 2024-12-07T18:21:58,555 DEBUG [Thread-2512 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:21:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:21:58,601 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412071fb35bca3e524b64b7d3a9ff43419fff_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071fb35bca3e524b64b7d3a9ff43419fff_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:21:58,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c635d52c92784d02bb8d607e03d83bc4, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:21:58,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c635d52c92784d02bb8d607e03d83bc4 is 175, key is test_row_0/A:col10/1733595716431/Put/seqid=0 2024-12-07T18:21:58,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742481_1657 (size=31255) 2024-12-07T18:21:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:21:59,006 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=329, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c635d52c92784d02bb8d607e03d83bc4 2024-12-07T18:21:59,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/00cb2c30ac7544239b0e13770f2f918e is 50, key is test_row_0/B:col10/1733595716431/Put/seqid=0 2024-12-07T18:21:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742482_1658 (size=12301) 2024-12-07T18:21:59,415 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/00cb2c30ac7544239b0e13770f2f918e 2024-12-07T18:21:59,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5ec7f5d1e48f4cde8c5708ab55069bd9 is 50, key is test_row_0/C:col10/1733595716431/Put/seqid=0 2024-12-07T18:21:59,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742483_1659 (size=12301) 2024-12-07T18:21:59,424 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5ec7f5d1e48f4cde8c5708ab55069bd9 2024-12-07T18:21:59,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/c635d52c92784d02bb8d607e03d83bc4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c635d52c92784d02bb8d607e03d83bc4 2024-12-07T18:21:59,430 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c635d52c92784d02bb8d607e03d83bc4, entries=150, sequenceid=329, filesize=30.5 K 2024-12-07T18:21:59,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/00cb2c30ac7544239b0e13770f2f918e as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/00cb2c30ac7544239b0e13770f2f918e 2024-12-07T18:21:59,433 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/00cb2c30ac7544239b0e13770f2f918e, entries=150, sequenceid=329, filesize=12.0 K 2024-12-07T18:21:59,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/5ec7f5d1e48f4cde8c5708ab55069bd9 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5ec7f5d1e48f4cde8c5708ab55069bd9 2024-12-07T18:21:59,436 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5ec7f5d1e48f4cde8c5708ab55069bd9, entries=150, sequenceid=329, filesize=12.0 K 2024-12-07T18:21:59,436 INFO [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=6.71 KB/6870 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1246ms, sequenceid=329, compaction requested=true 2024-12-07T18:21:59,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:21:59,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:21:59,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8a7a030b35db:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-12-07T18:21:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-12-07T18:21:59,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-12-07T18:21:59,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6170 sec 2024-12-07T18:21:59,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 2.6200 sec 2024-12-07T18:22:00,640 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T18:22:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-07T18:22:00,923 INFO [Thread-2516 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-12-07T18:22:01,096 DEBUG [Thread-2508 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7586d to 127.0.0.1:56016 2024-12-07T18:22:01,096 DEBUG [Thread-2508 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:01,100 DEBUG [Thread-2506 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4bbf3c1c to 127.0.0.1:56016 2024-12-07T18:22:01,100 DEBUG [Thread-2506 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:01,132 DEBUG [Thread-2514 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0672325a to 127.0.0.1:56016 2024-12-07T18:22:01,132 DEBUG [Thread-2514 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:01,184 DEBUG [Thread-2510 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4949adfa to 127.0.0.1:56016 2024-12-07T18:22:01,184 DEBUG [Thread-2510 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 150 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6427 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6318 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6608 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6389 2024-12-07T18:22:01,184 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6320 2024-12-07T18:22:01,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-07T18:22:01,185 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:22:01,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6364386e to 127.0.0.1:56016 2024-12-07T18:22:01,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:01,185 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-07T18:22:01,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-07T18:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:01,188 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595721187"}]},"ts":"1733595721187"} 2024-12-07T18:22:01,188 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-07T18:22:01,190 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-07T18:22:01,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-07T18:22:01,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, UNASSIGN}] 2024-12-07T18:22:01,192 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, UNASSIGN 2024-12-07T18:22:01,192 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=CLOSING, regionLocation=8a7a030b35db,45237,1733595542335 2024-12-07T18:22:01,193 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T18:22:01,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; CloseRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335}] 2024-12-07T18:22:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:01,344 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8a7a030b35db,45237,1733595542335 2024-12-07T18:22:01,344 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(124): Close 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:01,344 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T18:22:01,344 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1681): Closing 5f25402e6c4eaa56d1d09719bc4c6a4c, disabling compactions & flushes 2024-12-07T18:22:01,345 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. after waiting 0 ms 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:22:01,345 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(2837): Flushing 5f25402e6c4eaa56d1d09719bc4c6a4c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=A 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=B 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5f25402e6c4eaa56d1d09719bc4c6a4c, store=C 2024-12-07T18:22:01,345 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-07T18:22:01,350 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207511de81b68de4374b7467f07936c58cf_5f25402e6c4eaa56d1d09719bc4c6a4c is 50, key is test_row_0/A:col10/1733595721131/Put/seqid=0 2024-12-07T18:22:01,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742484_1660 (size=9914) 2024-12-07T18:22:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:01,753 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T18:22:01,756 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241207511de81b68de4374b7467f07936c58cf_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207511de81b68de4374b7467f07936c58cf_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:01,756 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf22918f29ed4292851f806eec725cfb, store: [table=TestAcidGuarantees family=A region=5f25402e6c4eaa56d1d09719bc4c6a4c] 2024-12-07T18:22:01,757 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf22918f29ed4292851f806eec725cfb is 175, key is test_row_0/A:col10/1733595721131/Put/seqid=0 2024-12-07T18:22:01,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742485_1661 (size=22561) 2024-12-07T18:22:01,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:02,160 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf22918f29ed4292851f806eec725cfb 2024-12-07T18:22:02,165 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/8623ee3dcd1d49bdb5cca56caced6d57 is 50, key is test_row_0/B:col10/1733595721131/Put/seqid=0 2024-12-07T18:22:02,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742486_1662 (size=9857) 2024-12-07T18:22:02,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:02,569 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/8623ee3dcd1d49bdb5cca56caced6d57 2024-12-07T18:22:02,574 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/89903543f67f4144b53b7b7b5131bd9b is 50, key is test_row_0/C:col10/1733595721131/Put/seqid=0 2024-12-07T18:22:02,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742487_1663 (size=9857) 2024-12-07T18:22:02,978 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/89903543f67f4144b53b7b7b5131bd9b 2024-12-07T18:22:02,981 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/A/cf22918f29ed4292851f806eec725cfb as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf22918f29ed4292851f806eec725cfb 2024-12-07T18:22:02,984 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf22918f29ed4292851f806eec725cfb, entries=100, sequenceid=337, filesize=22.0 K 2024-12-07T18:22:02,984 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/B/8623ee3dcd1d49bdb5cca56caced6d57 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/8623ee3dcd1d49bdb5cca56caced6d57 2024-12-07T18:22:02,986 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/8623ee3dcd1d49bdb5cca56caced6d57, entries=100, sequenceid=337, filesize=9.6 K 2024-12-07T18:22:02,987 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/.tmp/C/89903543f67f4144b53b7b7b5131bd9b as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/89903543f67f4144b53b7b7b5131bd9b 2024-12-07T18:22:02,989 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/89903543f67f4144b53b7b7b5131bd9b, entries=100, sequenceid=337, filesize=9.6 K 2024-12-07T18:22:02,990 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5f25402e6c4eaa56d1d09719bc4c6a4c in 1645ms, sequenceid=337, compaction requested=true 2024-12-07T18:22:02,990 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03] to archive 2024-12-07T18:22:02,991 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:22:02,992 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cb04d5aa778f49578208f46061db7115 2024-12-07T18:22:02,993 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/9b297b8329f74d8393126ed0bbc7c9e5 2024-12-07T18:22:02,993 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c89e5fc264ef473a90b2f69036233dd2 2024-12-07T18:22:02,994 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/2920d64de05047c2a2b2e57943223259 2024-12-07T18:22:02,995 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a991a45a63164af3b4d442b1dcc4e9bb 2024-12-07T18:22:02,996 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/d5f2740ace67477e9cb66863f36b4351 2024-12-07T18:22:02,996 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/940983ba985b43a8b81db5d9f55cf12f 2024-12-07T18:22:02,997 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/3fe45c37502f4d24a7a5573a11dc4679 2024-12-07T18:22:02,998 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/98898ab44ce44822b5daacca49bde9fc 2024-12-07T18:22:02,998 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ba75643e4d6d4945bc8cf62200412279 2024-12-07T18:22:02,999 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/a5f0da2864f84ee4b09d8ebf08d35511 2024-12-07T18:22:03,000 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/fc124231ce434cab9c80d219e8f22c15 2024-12-07T18:22:03,001 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/ce6110bdee0a456facc0e57aed41cad7 2024-12-07T18:22:03,001 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cd3804e51f4548e9815fd3c409fcf832 2024-12-07T18:22:03,002 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/30aeae916bba419ea625de5bb8856da0 2024-12-07T18:22:03,003 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/38c4e20617b846e1a4e4773e629ae808 2024-12-07T18:22:03,003 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/e968d5150c08481897312cc94acb5e75 2024-12-07T18:22:03,004 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf2020b344e04bf59f1df3d49a862155 2024-12-07T18:22:03,005 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cafbfbc75eaf44d4bcca8986bdd52ee4 2024-12-07T18:22:03,006 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/03c52bcc8a954808b51a332e66503b03 2024-12-07T18:22:03,007 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/936e13e7b0d542eaae019b0d29a21713, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/c3139bd6264e41018ba4ee05ffe84599, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2291c604e00a46daa90e2e02bc067167, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/38e39d00fddb47468e347b0353df1c54, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a65a2985bdb3409cabab448cce75fb38, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44] to archive 2024-12-07T18:22:03,008 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:22:03,009 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d6731a4249264d629fd908983943dcd2 2024-12-07T18:22:03,009 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a25c01571b36447ea79e7efc62f68863 2024-12-07T18:22:03,010 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/936e13e7b0d542eaae019b0d29a21713 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/936e13e7b0d542eaae019b0d29a21713 2024-12-07T18:22:03,011 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7483f2ed5eae4603a82a443f9f95ea2f 2024-12-07T18:22:03,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/abf358dbc40e47238248cfc31d67b218 2024-12-07T18:22:03,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a4040b6649d647a38b12c4b02ba8c7c0 2024-12-07T18:22:03,013 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/337ccbb959824c5e87965e68283265f2 2024-12-07T18:22:03,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/c3139bd6264e41018ba4ee05ffe84599 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/c3139bd6264e41018ba4ee05ffe84599 2024-12-07T18:22:03,015 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d2ff4c9e98b64377a69b9db37c1246bd 2024-12-07T18:22:03,015 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/76f637d131ca46a5982122884c6f7af6 2024-12-07T18:22:03,016 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2291c604e00a46daa90e2e02bc067167 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2291c604e00a46daa90e2e02bc067167 2024-12-07T18:22:03,017 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/23a92ec039c64750b3102fa170594805 2024-12-07T18:22:03,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1b948edc854c4153b1db4695c38e5878 2024-12-07T18:22:03,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/38e39d00fddb47468e347b0353df1c54 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/38e39d00fddb47468e347b0353df1c54 2024-12-07T18:22:03,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/22299a8a62944ce0aec00bd14703edf0 2024-12-07T18:22:03,020 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7d47ff935db94ee4910f8d3dca8ff4ba 2024-12-07T18:22:03,021 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a65a2985bdb3409cabab448cce75fb38 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a65a2985bdb3409cabab448cce75fb38 2024-12-07T18:22:03,022 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/d874f5ab572e487f94d9659a19af40b6 2024-12-07T18:22:03,022 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/a68e56c92007495ba0473afe86c4a9ac 2024-12-07T18:22:03,023 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/7139b9306f6941c7aa50bde5069cef44 2024-12-07T18:22:03,024 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/a0ef0727a8734939aa7a7302c86250d5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/c412f527a364432f8de8e9e12d1a8dc4, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/2bad94f0637746f3b40096d79e5a29f7, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/982210252b89436987597f996d5b3ddd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/24794a8985814bc8a823e49789b0f7f1, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535] to archive 2024-12-07T18:22:03,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T18:22:03,026 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/46ea84b2eddf4a9186f0315d355e713c 2024-12-07T18:22:03,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d014afffebc34df0a71c833d6bab987c 2024-12-07T18:22:03,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/a0ef0727a8734939aa7a7302c86250d5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/a0ef0727a8734939aa7a7302c86250d5 2024-12-07T18:22:03,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6ffaa039f0d24c7b806dfe0afaab113d 2024-12-07T18:22:03,029 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/714dee6695a74148807f1824ae64e5ec 2024-12-07T18:22:03,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/4fcb9b80e06d47c8b54819741b50bac5 2024-12-07T18:22:03,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/f2e74ce4b1b4418592074c44c71e0155 2024-12-07T18:22:03,031 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/c412f527a364432f8de8e9e12d1a8dc4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/c412f527a364432f8de8e9e12d1a8dc4 2024-12-07T18:22:03,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ac356d7d5dd64cd4ba83435d0673f620 2024-12-07T18:22:03,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/12aec08048c6431f8232c444113eee3f 2024-12-07T18:22:03,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/2bad94f0637746f3b40096d79e5a29f7 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/2bad94f0637746f3b40096d79e5a29f7 2024-12-07T18:22:03,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/6c0f334859ed4c668e78c5c6930638f5 2024-12-07T18:22:03,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/fc076d6c11824be5bef5e9cdb6998285 2024-12-07T18:22:03,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/982210252b89436987597f996d5b3ddd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/982210252b89436987597f996d5b3ddd 2024-12-07T18:22:03,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/15be93039235462ab43ac04ea8970f7c 2024-12-07T18:22:03,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/967e331083f7412392cb677b1de87df2 2024-12-07T18:22:03,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/24794a8985814bc8a823e49789b0f7f1 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/24794a8985814bc8a823e49789b0f7f1 2024-12-07T18:22:03,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/d223591949ae4b12a7639d0248e89754 2024-12-07T18:22:03,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5d155a0973cb45e0b9e73a4b9781befd 2024-12-07T18:22:03,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/502c64ffdc73402193274898f1a58535 2024-12-07T18:22:03,044 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=4 2024-12-07T18:22:03,044 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c. 2024-12-07T18:22:03,044 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1635): Region close journal for 5f25402e6c4eaa56d1d09719bc4c6a4c: 2024-12-07T18:22:03,045 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(170): Closed 5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,046 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=5f25402e6c4eaa56d1d09719bc4c6a4c, regionState=CLOSED 2024-12-07T18:22:03,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-07T18:22:03,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseRegionProcedure 5f25402e6c4eaa56d1d09719bc4c6a4c, server=8a7a030b35db,45237,1733595542335 in 1.8540 sec 2024-12-07T18:22:03,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=191, resume processing ppid=190 2024-12-07T18:22:03,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, ppid=190, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5f25402e6c4eaa56d1d09719bc4c6a4c, UNASSIGN in 1.8570 sec 2024-12-07T18:22:03,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-12-07T18:22:03,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8590 sec 2024-12-07T18:22:03,051 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733595723051"}]},"ts":"1733595723051"} 2024-12-07T18:22:03,051 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-07T18:22:03,053 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-07T18:22:03,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8680 sec 2024-12-07T18:22:03,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-07T18:22:03,291 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-12-07T18:22:03,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-07T18:22:03,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] procedure2.ProcedureExecutor(1098): Stored pid=193, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,292 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=193, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-07T18:22:03,293 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=193, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,294 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,296 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C, FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits] 2024-12-07T18:22:03,298 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/947a1d256aa34a6b8f9567cd5137bfd3 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/947a1d256aa34a6b8f9567cd5137bfd3 2024-12-07T18:22:03,299 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/aa35d74631854e0e85b79a31a31b3023 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/aa35d74631854e0e85b79a31a31b3023 2024-12-07T18:22:03,300 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c635d52c92784d02bb8d607e03d83bc4 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/c635d52c92784d02bb8d607e03d83bc4 2024-12-07T18:22:03,301 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf22918f29ed4292851f806eec725cfb to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/A/cf22918f29ed4292851f806eec725cfb 2024-12-07T18:22:03,303 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/00cb2c30ac7544239b0e13770f2f918e to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/00cb2c30ac7544239b0e13770f2f918e 2024-12-07T18:22:03,304 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1787ee47cd8a443eb2efc9a285dbdce9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/1787ee47cd8a443eb2efc9a285dbdce9 2024-12-07T18:22:03,305 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2752f229851149d98374be86bdcfdabc to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/2752f229851149d98374be86bdcfdabc 2024-12-07T18:22:03,305 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/8623ee3dcd1d49bdb5cca56caced6d57 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/B/8623ee3dcd1d49bdb5cca56caced6d57 2024-12-07T18:22:03,307 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5c963538c9b4495eb53d90a4c75af114 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5c963538c9b4495eb53d90a4c75af114 2024-12-07T18:22:03,308 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5ec7f5d1e48f4cde8c5708ab55069bd9 to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/5ec7f5d1e48f4cde8c5708ab55069bd9 2024-12-07T18:22:03,309 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/89903543f67f4144b53b7b7b5131bd9b to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/89903543f67f4144b53b7b7b5131bd9b 2024-12-07T18:22:03,310 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ea32e89844fb464585a3d714ca44f50c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/C/ea32e89844fb464585a3d714ca44f50c 2024-12-07T18:22:03,312 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits/340.seqid to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c/recovered.edits/340.seqid 2024-12-07T18:22:03,313 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/default/TestAcidGuarantees/5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,313 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-07T18:22:03,313 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:22:03,314 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-07T18:22:03,316 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071fb35bca3e524b64b7d3a9ff43419fff_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412071fb35bca3e524b64b7d3a9ff43419fff_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,317 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073014fde487424510bf3d078aa2aaf3a0_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412073014fde487424510bf3d078aa2aaf3a0_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,318 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120743d2d3433fa443e1894a26d310d99276_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120743d2d3433fa443e1894a26d310d99276_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,319 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074b8e19d1057c410988a95c5a3429a791_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074b8e19d1057c410988a95c5a3429a791_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,320 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074d9f0bdf286448199fb12b2da1aa6d25_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412074d9f0bdf286448199fb12b2da1aa6d25_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,321 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207511de81b68de4374b7467f07936c58cf_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207511de81b68de4374b7467f07936c58cf_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,322 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075270c6a3b1ae4b73b848c14438bf6e60_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412075270c6a3b1ae4b73b848c14438bf6e60_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,322 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207712da428e42b45ce9740f14f6eece605_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207712da428e42b45ce9740f14f6eece605_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,323 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079819badbaf244a799bb41409a8ad14cc_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412079819badbaf244a799bb41409a8ad14cc_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,324 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207af566f1a92c341ed9eb5a600dc2a202f_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207af566f1a92c341ed9eb5a600dc2a202f_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,325 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b3e61a487378473d8e525dd7b47c6aef_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207b3e61a487378473d8e525dd7b47c6aef_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,326 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cd4135bdae9d42628ae7013a33a689e4_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207cd4135bdae9d42628ae7013a33a689e4_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,327 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d7ca01f0437e4ad7873deac29c1a335d_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207d7ca01f0437e4ad7873deac29c1a335d_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,328 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207db1df40cd20441ac9bd28db6ecf37eef_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207db1df40cd20441ac9bd28db6ecf37eef_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,328 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ddf2203d9703436aa7c827b6c7cf3c37_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207ddf2203d9703436aa7c827b6c7cf3c37_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,329 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e4b932202c3743a6b5fa684ea06b25b5_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207e4b932202c3743a6b5fa684ea06b25b5_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,330 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f4df79edb9cc46ef96ee59c1d7eea0b7_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207f4df79edb9cc46ef96ee59c1d7eea0b7_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,331 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fa9e90f7bb3042ac92b5ab38ab83d510_5f25402e6c4eaa56d1d09719bc4c6a4c to hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241207fa9e90f7bb3042ac92b5ab38ab83d510_5f25402e6c4eaa56d1d09719bc4c6a4c 2024-12-07T18:22:03,331 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-07T18:22:03,333 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=193, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,335 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-07T18:22:03,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-07T18:22:03,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=193, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-07T18:22:03,338 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733595723337"}]},"ts":"9223372036854775807"} 2024-12-07T18:22:03,339 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T18:22:03,339 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5f25402e6c4eaa56d1d09719bc4c6a4c, NAME => 'TestAcidGuarantees,,1733595694894.5f25402e6c4eaa56d1d09719bc4c6a4c.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T18:22:03,339 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-07T18:22:03,339 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733595723339"}]},"ts":"9223372036854775807"} 2024-12-07T18:22:03,340 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-07T18:22:03,342 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=193, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-07T18:22:03,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-12-07T18:22:03,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35545 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-07T18:22:03,394 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 193 completed 2024-12-07T18:22:03,403 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=240 (was 238) - Thread LEAK? -, OpenFileDescriptor=457 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=395 (was 437), ProcessCount=11 (was 11), AvailableMemoryMB=6877 (was 6897) 2024-12-07T18:22:03,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-07T18:22:03,403 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T18:22:03,403 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0870ca2a to 127.0.0.1:56016 2024-12-07T18:22:03,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:03,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T18:22:03,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=67080707, stopped=false 2024-12-07T18:22:03,403 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=8a7a030b35db,35545,1733595541544 2024-12-07T18:22:03,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T18:22:03,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T18:22:03,405 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-07T18:22:03,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:22:03,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:22:03,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T18:22:03,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T18:22:03,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:03,406 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8a7a030b35db,45237,1733595542335' ***** 2024-12-07T18:22:03,406 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T18:22:03,406 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T18:22:03,406 INFO [RS:0;8a7a030b35db:45237 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T18:22:03,407 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(3579): Received CLOSE for e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1224): stopping server 8a7a030b35db,45237,1733595542335 2024-12-07T18:22:03,407 DEBUG [RS:0;8a7a030b35db:45237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-07T18:22:03,407 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e84b61442b688dc7f09be574fc7d8389, disabling compactions & flushes 2024-12-07T18:22:03,407 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:22:03,407 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:22:03,407 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. after waiting 0 ms 2024-12-07T18:22:03,407 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:22:03,407 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-07T18:22:03,407 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e84b61442b688dc7f09be574fc7d8389 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-07T18:22:03,407 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1603): Online Regions={e84b61442b688dc7f09be574fc7d8389=hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T18:22:03,408 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T18:22:03,408 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T18:22:03,408 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T18:22:03,408 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T18:22:03,408 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T18:22:03,408 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-07T18:22:03,411 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:22:03,423 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/.tmp/info/b7268e1ac36c4ff8928ae8f918bbe6c3 is 45, key is default/info:d/1733595547001/Put/seqid=0 2024-12-07T18:22:03,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742488_1664 (size=5037) 2024-12-07T18:22:03,429 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/info/2baf7e70ec924e2fb6f8e72db2070a70 is 143, key is hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389./info:regioninfo/1733595546889/Put/seqid=0 2024-12-07T18:22:03,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742489_1665 (size=7725) 2024-12-07T18:22:03,479 INFO [regionserver/8a7a030b35db:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T18:22:03,611 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:22:03,811 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e84b61442b688dc7f09be574fc7d8389 2024-12-07T18:22:03,827 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/.tmp/info/b7268e1ac36c4ff8928ae8f918bbe6c3 2024-12-07T18:22:03,830 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/.tmp/info/b7268e1ac36c4ff8928ae8f918bbe6c3 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/info/b7268e1ac36c4ff8928ae8f918bbe6c3 2024-12-07T18:22:03,833 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/info/2baf7e70ec924e2fb6f8e72db2070a70 2024-12-07T18:22:03,833 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/info/b7268e1ac36c4ff8928ae8f918bbe6c3, entries=2, sequenceid=6, filesize=4.9 K 2024-12-07T18:22:03,834 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e84b61442b688dc7f09be574fc7d8389 in 427ms, sequenceid=6, compaction requested=false 2024-12-07T18:22:03,837 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/namespace/e84b61442b688dc7f09be574fc7d8389/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T18:22:03,837 INFO [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:22:03,837 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e84b61442b688dc7f09be574fc7d8389: 2024-12-07T18:22:03,838 DEBUG [RS_CLOSE_REGION-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733595545645.e84b61442b688dc7f09be574fc7d8389. 2024-12-07T18:22:03,851 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/rep_barrier/75a44fbd6781495e95d367e1d0decc99 is 102, key is TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb./rep_barrier:/1733595572336/DeleteFamily/seqid=0 2024-12-07T18:22:03,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742490_1666 (size=6025) 2024-12-07T18:22:04,012 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-07T18:22:04,212 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-07T18:22:04,254 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/rep_barrier/75a44fbd6781495e95d367e1d0decc99 2024-12-07T18:22:04,272 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/table/a7b524174f924527b9c3d6ba572db661 is 96, key is TestAcidGuarantees,,1733595547235.a05999984107cee49bb0b7292dd34cbb./table:/1733595572336/DeleteFamily/seqid=0 2024-12-07T18:22:04,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742491_1667 (size=5942) 2024-12-07T18:22:04,296 INFO [regionserver/8a7a030b35db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T18:22:04,296 INFO [regionserver/8a7a030b35db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T18:22:04,412 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-07T18:22:04,412 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T18:22:04,412 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-07T18:22:04,612 DEBUG [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-07T18:22:04,676 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/table/a7b524174f924527b9c3d6ba572db661 2024-12-07T18:22:04,679 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/info/2baf7e70ec924e2fb6f8e72db2070a70 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/info/2baf7e70ec924e2fb6f8e72db2070a70 2024-12-07T18:22:04,682 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/info/2baf7e70ec924e2fb6f8e72db2070a70, entries=22, sequenceid=93, filesize=7.5 K 2024-12-07T18:22:04,682 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/rep_barrier/75a44fbd6781495e95d367e1d0decc99 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/rep_barrier/75a44fbd6781495e95d367e1d0decc99 2024-12-07T18:22:04,685 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/rep_barrier/75a44fbd6781495e95d367e1d0decc99, entries=6, sequenceid=93, filesize=5.9 K 2024-12-07T18:22:04,685 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/.tmp/table/a7b524174f924527b9c3d6ba572db661 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/table/a7b524174f924527b9c3d6ba572db661 2024-12-07T18:22:04,687 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/table/a7b524174f924527b9c3d6ba572db661, entries=9, sequenceid=93, filesize=5.8 K 2024-12-07T18:22:04,688 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1280ms, sequenceid=93, compaction requested=false 2024-12-07T18:22:04,691 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-07T18:22:04,691 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T18:22:04,692 INFO [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T18:22:04,692 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T18:22:04,692 DEBUG [RS_CLOSE_META-regionserver/8a7a030b35db:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T18:22:04,813 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1250): stopping server 8a7a030b35db,45237,1733595542335; all regions closed. 2024-12-07T18:22:04,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741834_1010 (size=26050) 2024-12-07T18:22:04,818 DEBUG [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/oldWALs 2024-12-07T18:22:04,818 INFO [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 8a7a030b35db%2C45237%2C1733595542335.meta:.meta(num 1733595545363) 2024-12-07T18:22:04,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741832_1008 (size=15747101) 2024-12-07T18:22:04,821 DEBUG [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/oldWALs 2024-12-07T18:22:04,821 INFO [RS:0;8a7a030b35db:45237 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 8a7a030b35db%2C45237%2C1733595542335:(num 1733595544455) 2024-12-07T18:22:04,821 DEBUG [RS:0;8a7a030b35db:45237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:04,821 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T18:22:04,822 INFO [RS:0;8a7a030b35db:45237 {}] hbase.ChoreService(370): Chore service for: regionserver/8a7a030b35db:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T18:22:04,822 INFO [regionserver/8a7a030b35db:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T18:22:04,822 INFO [RS:0;8a7a030b35db:45237 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45237 2024-12-07T18:22:04,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8a7a030b35db,45237,1733595542335 2024-12-07T18:22:04,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T18:22:04,827 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007fb7388f34e8@e606720 rejected from java.util.concurrent.ThreadPoolExecutor@4b62f888[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T18:22:04,827 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8a7a030b35db,45237,1733595542335] 2024-12-07T18:22:04,827 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 8a7a030b35db,45237,1733595542335; numProcessing=1 2024-12-07T18:22:04,829 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/8a7a030b35db,45237,1733595542335 already deleted, retry=false 2024-12-07T18:22:04,829 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 8a7a030b35db,45237,1733595542335 expired; onlineServers=0 2024-12-07T18:22:04,829 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8a7a030b35db,35545,1733595541544' ***** 2024-12-07T18:22:04,829 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T18:22:04,829 DEBUG [M:0;8a7a030b35db:35545 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238f856c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8a7a030b35db/172.17.0.2:0 2024-12-07T18:22:04,829 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegionServer(1224): stopping server 8a7a030b35db,35545,1733595541544 2024-12-07T18:22:04,829 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegionServer(1250): stopping server 8a7a030b35db,35545,1733595541544; all regions closed. 2024-12-07T18:22:04,829 DEBUG [M:0;8a7a030b35db:35545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T18:22:04,829 DEBUG [M:0;8a7a030b35db:35545 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T18:22:04,829 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T18:22:04,829 DEBUG [M:0;8a7a030b35db:35545 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T18:22:04,829 DEBUG [master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.small.0-1733595544120 {}] cleaner.HFileCleaner(306): Exit Thread[master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.small.0-1733595544120,5,FailOnTimeoutGroup] 2024-12-07T18:22:04,829 DEBUG [master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.large.0-1733595544120 {}] cleaner.HFileCleaner(306): Exit Thread[master/8a7a030b35db:0:becomeActiveMaster-HFileCleaner.large.0-1733595544120,5,FailOnTimeoutGroup] 2024-12-07T18:22:04,829 INFO [M:0;8a7a030b35db:35545 {}] hbase.ChoreService(370): Chore service for: master/8a7a030b35db:0 had [] on shutdown 2024-12-07T18:22:04,829 DEBUG [M:0;8a7a030b35db:35545 {}] master.HMaster(1733): Stopping service threads 2024-12-07T18:22:04,830 INFO [M:0;8a7a030b35db:35545 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T18:22:04,830 ERROR [M:0;8a7a030b35db:35545 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-07T18:22:04,830 INFO [M:0;8a7a030b35db:35545 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T18:22:04,830 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T18:22:04,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T18:22:04,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T18:22:04,831 DEBUG [M:0;8a7a030b35db:35545 {}] zookeeper.ZKUtil(347): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T18:22:04,831 WARN [M:0;8a7a030b35db:35545 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T18:22:04,831 INFO [M:0;8a7a030b35db:35545 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-07T18:22:04,831 INFO [M:0;8a7a030b35db:35545 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T18:22:04,831 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T18:22:04,831 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:22:04,831 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:22:04,831 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T18:22:04,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T18:22:04,831 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:22:04,831 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=792.18 KB heapSize=974.72 KB 2024-12-07T18:22:04,846 DEBUG [M:0;8a7a030b35db:35545 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a019ace3ac034a548fa4f6db20b125a4 is 82, key is hbase:meta,,1/info:regioninfo/1733595545517/Put/seqid=0 2024-12-07T18:22:04,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742492_1668 (size=5672) 2024-12-07T18:22:04,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T18:22:04,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45237-0x1006db465c50001, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T18:22:04,928 INFO [RS:0;8a7a030b35db:45237 {}] regionserver.HRegionServer(1307): Exiting; stopping=8a7a030b35db,45237,1733595542335; zookeeper connection closed. 2024-12-07T18:22:04,928 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b528ca6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b528ca6 2024-12-07T18:22:04,929 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T18:22:05,250 INFO [M:0;8a7a030b35db:35545 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a019ace3ac034a548fa4f6db20b125a4 2024-12-07T18:22:05,269 DEBUG [M:0;8a7a030b35db:35545 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2e7f70475594ed1b3458387c4bd7783 is 2283, key is \x00\x00\x00\x00\x00\x00\x00&/proc:d/1733595574286/Put/seqid=0 2024-12-07T18:22:05,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742493_1669 (size=47498) 2024-12-07T18:22:05,674 INFO [M:0;8a7a030b35db:35545 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=791.63 KB at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2e7f70475594ed1b3458387c4bd7783 2024-12-07T18:22:05,676 INFO [M:0;8a7a030b35db:35545 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c2e7f70475594ed1b3458387c4bd7783 2024-12-07T18:22:05,691 DEBUG [M:0;8a7a030b35db:35545 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6a01846142d40319dd91b25b29e7777 is 69, key is 8a7a030b35db,45237,1733595542335/rs:state/1733595544224/Put/seqid=0 2024-12-07T18:22:05,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073742494_1670 (size=5156) 2024-12-07T18:22:06,095 INFO [M:0;8a7a030b35db:35545 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6a01846142d40319dd91b25b29e7777 2024-12-07T18:22:06,098 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a019ace3ac034a548fa4f6db20b125a4 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a019ace3ac034a548fa4f6db20b125a4 2024-12-07T18:22:06,101 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a019ace3ac034a548fa4f6db20b125a4, entries=8, sequenceid=2241, filesize=5.5 K 2024-12-07T18:22:06,101 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2e7f70475594ed1b3458387c4bd7783 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c2e7f70475594ed1b3458387c4bd7783 2024-12-07T18:22:06,103 INFO [M:0;8a7a030b35db:35545 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c2e7f70475594ed1b3458387c4bd7783 2024-12-07T18:22:06,103 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c2e7f70475594ed1b3458387c4bd7783, entries=193, sequenceid=2241, filesize=46.4 K 2024-12-07T18:22:06,104 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6a01846142d40319dd91b25b29e7777 as hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6a01846142d40319dd91b25b29e7777 2024-12-07T18:22:06,106 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39489/user/jenkins/test-data/c5bb22ba-625f-36c4-fe46-d40e2a1ec5c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6a01846142d40319dd91b25b29e7777, entries=1, sequenceid=2241, filesize=5.0 K 2024-12-07T18:22:06,107 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(3040): Finished flush of dataSize ~792.18 KB/811197, heapSize ~974.42 KB/997808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1276ms, sequenceid=2241, compaction requested=false 2024-12-07T18:22:06,108 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T18:22:06,108 DEBUG [M:0;8a7a030b35db:35545 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T18:22:06,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741830_1006 (size=958157) 2024-12-07T18:22:06,110 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T18:22:06,110 INFO [M:0;8a7a030b35db:35545 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-07T18:22:06,110 INFO [M:0;8a7a030b35db:35545 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35545 2024-12-07T18:22:06,112 DEBUG [M:0;8a7a030b35db:35545 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/8a7a030b35db,35545,1733595541544 already deleted, retry=false 2024-12-07T18:22:06,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T18:22:06,214 INFO [M:0;8a7a030b35db:35545 {}] regionserver.HRegionServer(1307): Exiting; stopping=8a7a030b35db,35545,1733595541544; zookeeper connection closed. 2024-12-07T18:22:06,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35545-0x1006db465c50000, quorum=127.0.0.1:56016, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T18:22:06,219 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29607158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T18:22:06,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T18:22:06,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T18:22:06,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T18:22:06,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.log.dir/,STOPPED} 2024-12-07T18:22:06,224 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T18:22:06,224 WARN [BP-737318815-172.17.0.2-1733595538639 heartbeating to localhost/127.0.0.1:39489 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T18:22:06,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T18:22:06,224 WARN [BP-737318815-172.17.0.2-1733595538639 heartbeating to localhost/127.0.0.1:39489 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-737318815-172.17.0.2-1733595538639 (Datanode Uuid 9fda8575-3961-4bd1-9383-11e803969db3) service to localhost/127.0.0.1:39489 2024-12-07T18:22:06,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/dfs/data/data1/current/BP-737318815-172.17.0.2-1733595538639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T18:22:06,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/cluster_89eff866-5171-1256-88aa-0954626babf5/dfs/data/data2/current/BP-737318815-172.17.0.2-1733595538639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T18:22:06,228 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T18:22:06,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6904431c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T18:22:06,238 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T18:22:06,238 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T18:22:06,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T18:22:06,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b6eda534-652d-0730-4d4e-d9bc1d4dac62/hadoop.log.dir/,STOPPED} 2024-12-07T18:22:06,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-07T18:22:06,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down